1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -fopenmp -fnoopenmp-use-tls -x c++ -std=c++11 -triple x86_64-linux -emit-pch -o %t %s
4 // RUN: %clang_cc1 -fopenmp -fnoopenmp-use-tls -x c++ -triple x86_64-linux -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
5 // RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -std=c++11 -DLAMBDA -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -fblocks -DBLOCKS -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK4
7 // RUN: %clang_cc1 -verify -fopenmp -fnoopenmp-use-tls -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
8 
9 // RUN: %clang_cc1 -verify -fopenmp-simd -fnoopenmp-use-tls -x c++ -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
10 // RUN: %clang_cc1 -fopenmp-simd -fnoopenmp-use-tls -x c++ -std=c++11 -triple x86_64-linux -emit-pch -o %t %s
11 // RUN: %clang_cc1 -fopenmp-simd -fnoopenmp-use-tls -x c++ -triple x86_64-linux -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -verify -fopenmp-simd -fnoopenmp-use-tls -x c++ -std=c++11 -DLAMBDA -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // RUN: %clang_cc1 -verify -fopenmp-simd -fnoopenmp-use-tls -x c++ -fblocks -DBLOCKS -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
14 // RUN: %clang_cc1 -verify -fopenmp-simd -fnoopenmp-use-tls -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
15 
16 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
17 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-linux -emit-pch -o %t %s
18 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-linux -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
19 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
20 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK14
21 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DARRAY -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15
22 // RUN: %clang_cc1 -verify -fopenmp -x c++ -DNESTED -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK16
23 
24 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
25 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-linux -emit-pch -o %t %s
26 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-linux -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
27 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
28 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
29 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DARRAY -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
30 // expected-no-diagnostics
31 #if !defined(ARRAY) && !defined(NESTED)
32 #ifndef HEADER
33 #define HEADER
34 
35 volatile int g __attribute__((aligned(128))) = 1212;
36 #pragma omp threadprivate(g)
37 
38 template <class T>
39 struct S {
40   T f;
41   S(T a) : f(a + g) {}
42   S() : f(g) {}
43   S &operator=(const S &) { return *this; };
44   operator T() { return T(); }
45   ~S() {}
46 };
47 
48 
49 
50 template <typename T>
51 T tmain() {
52   S<T> test;
53   test = S<T>();
54   static T t_var __attribute__((aligned(128))) = 333;
55   static T vec[] __attribute__((aligned(128))) = {3, 3};
56   static S<T> s_arr[] __attribute__((aligned(128))) = {1, 2};
57   static S<T> var __attribute__((aligned(128))) (3);
58 #pragma omp threadprivate(t_var, vec, s_arr, var)
59 #pragma omp parallel copyin(t_var, vec, s_arr, var)
60   {
61     vec[0] = t_var;
62     s_arr[0] = var;
63   }
64 #pragma omp parallel copyin(t_var)
65   {}
66   return T();
67 }
68 
69 int main() {
70 #ifdef LAMBDA
71   [&]() {
72 
73 
74 #pragma omp parallel copyin(g)
75   {
76 
77     // threadprivate_g = g;
78 
79 
80     g = 1;
81 
82     [&]() {
83       g = 2;
84 
85     }();
86   }
87   }();
88   return 0;
89 #elif defined(BLOCKS)
90 
91   ^{
92 
93 
94 #pragma omp parallel copyin(g)
95   {
96 
97     // threadprivate_g = g;
98 
99 
100     g = 1;
101 
102 
103     ^{
104       g = 2;
105 
106     }();
107   }
108   }();
109   return 0;
110 #else
111   S<float> test;
112   test = S<float>();
113   static int t_var = 1122;
114   static int vec[] = {1, 2};
115   static S<float> s_arr[] = {1, 2};
116   static S<float> var(3);
117 #pragma omp threadprivate(t_var, vec, s_arr, var)
118 #pragma omp parallel copyin(t_var, vec, s_arr, var)
119   {
120     vec[0] = t_var;
121     s_arr[0] = var;
122   }
123 #pragma omp parallel copyin(t_var) default(none)
124   ++t_var;
125   return tmain<int>();
126 #endif
127 }
128 
129 
130 
131 
132 
133 // threadprivate_t_var = t_var;
134 
135 
136 
137 // threadprivate_vec = vec;
138 
139 
140 // threadprivate_s_arr = s_arr;
141 
142 
143 // threadprivate_var = var;
144 
145 
146 
147 
148 
149 
150 // threadprivate_t_var = t_var;
151 
152 
153 
154 
155 
156 
157 
158 // threadprivate_t_var = t_var;
159 
160 
161 
162 // threadprivate_vec = vec;
163 
164 
165 // threadprivate_s_arr = s_arr;
166 
167 
168 // threadprivate_var = var;
169 
170 
171 
172 
173 
174 
175 // threadprivate_t_var = t_var;
176 
177 
178 
179 
180 
181 #endif
182 #elif defined(ARRAY)
183 
184 struct St {
185   int a, b;
186   St() : a(0), b(0) {}
187   St &operator=(const St &) { return *this; };
188   ~St() {}
189 };
190 
191 void array_func() {
192   static int a[2];
193   static St s[2];
194 
195 
196 #pragma omp threadprivate(a, s)
197 #pragma omp parallel copyin(a, s)
198   ;
199 }
200 #elif defined(NESTED)
201 int t_init();
202 int t = t_init();
203 #pragma omp threadprivate(t)
204 void foo() {
205 #pragma omp parallel
206 #pragma omp parallel copyin(t)
207   ++t;
208 }
209 
210 #endif // NESTED
211 
212 // CHECK1-LABEL: define {{[^@]+}}@main
213 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
214 // CHECK1-NEXT:  entry:
215 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
216 // CHECK1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
217 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
218 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
219 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
220 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
221 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]], %struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
222 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR3:[0-9]+]]
223 // CHECK1-NEXT:    [[TMP0:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ4mainE5s_arr to i8*) acquire, align 8
224 // CHECK1-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
225 // CHECK1-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2:![0-9]+]]
226 // CHECK1:       init.check:
227 // CHECK1-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(i64* @_ZGVZ4mainE5s_arr) #[[ATTR3]]
228 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
229 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]]
230 // CHECK1:       init:
231 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
232 // CHECK1-NEXT:    call void @__kmpc_threadprivate_register(%struct.ident_t* @[[GLOB1]], i8* bitcast ([2 x %struct.S]* @_ZZ4mainE5s_arr to i8*), i8* (i8*)* @.__kmpc_global_ctor_., i8* (i8*, i8*)* null, void (i8*)* @.__kmpc_global_dtor_.)
233 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 0), float 1.000000e+00)
234 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 1), float 2.000000e+00)
235 // CHECK1-NEXT:    [[TMP3:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR3]]
236 // CHECK1-NEXT:    call void @__cxa_guard_release(i64* @_ZGVZ4mainE5s_arr) #[[ATTR3]]
237 // CHECK1-NEXT:    br label [[INIT_END]]
238 // CHECK1:       init.end:
239 // CHECK1-NEXT:    [[TMP4:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ4mainE3var to i8*) acquire, align 8
240 // CHECK1-NEXT:    [[GUARD_UNINITIALIZED1:%.*]] = icmp eq i8 [[TMP4]], 0
241 // CHECK1-NEXT:    br i1 [[GUARD_UNINITIALIZED1]], label [[INIT_CHECK2:%.*]], label [[INIT_END5:%.*]], !prof [[PROF2]]
242 // CHECK1:       init.check2:
243 // CHECK1-NEXT:    [[TMP5:%.*]] = call i32 @__cxa_guard_acquire(i64* @_ZGVZ4mainE3var) #[[ATTR3]]
244 // CHECK1-NEXT:    [[TOBOOL3:%.*]] = icmp ne i32 [[TMP5]], 0
245 // CHECK1-NEXT:    br i1 [[TOBOOL3]], label [[INIT4:%.*]], label [[INIT_END5]]
246 // CHECK1:       init4:
247 // CHECK1-NEXT:    [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
248 // CHECK1-NEXT:    call void @__kmpc_threadprivate_register(%struct.ident_t* @[[GLOB1]], i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i8* (i8*)* @.__kmpc_global_ctor_..1, i8* (i8*, i8*)* null, void (i8*)* @.__kmpc_global_dtor_..2)
249 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var, float 3.000000e+00)
250 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i8* @__dso_handle) #[[ATTR3]]
251 // CHECK1-NEXT:    call void @__cxa_guard_release(i64* @_ZGVZ4mainE3var) #[[ATTR3]]
252 // CHECK1-NEXT:    br label [[INIT_END5]]
253 // CHECK1:       init.end5:
254 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
255 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*))
256 // CHECK1-NEXT:    [[CALL6:%.*]] = call i32 @_Z5tmainIiET_v()
257 // CHECK1-NEXT:    store i32 [[CALL6]], i32* [[RETVAL]], align 4
258 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR3]]
259 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[RETVAL]], align 4
260 // CHECK1-NEXT:    ret i32 [[TMP8]]
261 //
262 //
263 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
264 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
265 // CHECK1-NEXT:  entry:
266 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
267 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
268 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
269 // CHECK1-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
270 // CHECK1-NEXT:    ret void
271 //
272 //
273 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEaSERKS0_
274 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 {
275 // CHECK1-NEXT:  entry:
276 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
277 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
278 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
279 // CHECK1-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
280 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
281 // CHECK1-NEXT:    ret %struct.S* [[THIS1]]
282 //
283 //
284 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
285 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
286 // CHECK1-NEXT:  entry:
287 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
288 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
289 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
290 // CHECK1-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
291 // CHECK1-NEXT:    ret void
292 //
293 //
294 // CHECK1-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_.
295 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] section ".text.startup" {
296 // CHECK1-NEXT:  entry:
297 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
298 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
299 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
300 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to [2 x %struct.S]*
301 // CHECK1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP2]], i64 0, i64 0
302 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
303 // CHECK1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
304 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
305 // CHECK1-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
306 // CHECK1-NEXT:    ret i8* [[TMP3]]
307 //
308 //
309 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
310 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
311 // CHECK1-NEXT:  entry:
312 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
313 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
314 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
315 // CHECK1-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
316 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
317 // CHECK1-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
318 // CHECK1-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
319 // CHECK1-NEXT:    ret void
320 //
321 //
322 // CHECK1-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_.
323 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
324 // CHECK1-NEXT:  entry:
325 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
326 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
327 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
328 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = bitcast i8* [[TMP1]] to %struct.S*
329 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
330 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
331 // CHECK1:       arraydestroy.body:
332 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
333 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
334 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
335 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
336 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
337 // CHECK1:       arraydestroy.done1:
338 // CHECK1-NEXT:    ret void
339 //
340 //
341 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
342 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
343 // CHECK1-NEXT:  entry:
344 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
345 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
346 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
347 // CHECK1:       arraydestroy.body:
348 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
349 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
350 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
351 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0)
352 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
353 // CHECK1:       arraydestroy.done1:
354 // CHECK1-NEXT:    ret void
355 //
356 //
357 // CHECK1-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..1
358 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
359 // CHECK1-NEXT:  entry:
360 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
361 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
362 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
363 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.S*
364 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]], float 3.000000e+00)
365 // CHECK1-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
366 // CHECK1-NEXT:    ret i8* [[TMP3]]
367 //
368 //
369 // CHECK1-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..2
370 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
371 // CHECK1-NEXT:  entry:
372 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
373 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
374 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
375 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.S*
376 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]]) #[[ATTR3]]
377 // CHECK1-NEXT:    ret void
378 //
379 //
380 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
381 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5:[0-9]+]] {
382 // CHECK1-NEXT:  entry:
383 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
384 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
385 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
386 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
387 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
388 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
389 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ4mainE5t_var to i8*), i64 4, i8*** @_ZZ4mainE5t_var.cache.)
390 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
391 // CHECK1-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
392 // CHECK1-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @_ZZ4mainE5t_var to i64), [[TMP4]]
393 // CHECK1-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
394 // CHECK1:       copyin.not.master:
395 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ4mainE5t_var, align 4
396 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 4
397 // CHECK1-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x i32]* @_ZZ4mainE3vec to i8*), i64 8, i8*** @_ZZ4mainE3vec.cache.)
398 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to [2 x i32]*
399 // CHECK1-NEXT:    [[TMP9:%.*]] = bitcast [2 x i32]* [[TMP8]] to i8*
400 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP9]], i8* align 4 bitcast ([2 x i32]* @_ZZ4mainE3vec to i8*), i64 8, i1 false)
401 // CHECK1-NEXT:    [[TMP10:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x %struct.S]* @_ZZ4mainE5s_arr to i8*), i64 8, i8*** @_ZZ4mainE5s_arr.cache.)
402 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to [2 x %struct.S]*
403 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP11]], i32 0, i32 0
404 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
405 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP12]]
406 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
407 // CHECK1:       omp.arraycpy.body:
408 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
409 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
410 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
411 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
412 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
413 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
414 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
415 // CHECK1:       omp.arraycpy.done1:
416 // CHECK1-NEXT:    [[TMP13:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i64 4, i8*** @_ZZ4mainE3var.cache.)
417 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
418 // CHECK1-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP14]], %struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var)
419 // CHECK1-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
420 // CHECK1:       copyin.not.master.end:
421 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
422 // CHECK1-NEXT:    [[TMP15:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ4mainE5t_var to i8*), i64 4, i8*** @_ZZ4mainE5t_var.cache.)
423 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32*
424 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
425 // CHECK1-NEXT:    [[TMP18:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x i32]* @_ZZ4mainE3vec to i8*), i64 8, i8*** @_ZZ4mainE3vec.cache.)
426 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast i8* [[TMP18]] to [2 x i32]*
427 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP19]], i64 0, i64 0
428 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[ARRAYIDX]], align 4
429 // CHECK1-NEXT:    [[TMP20:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i64 4, i8*** @_ZZ4mainE3var.cache.)
430 // CHECK1-NEXT:    [[TMP21:%.*]] = bitcast i8* [[TMP20]] to %struct.S*
431 // CHECK1-NEXT:    [[TMP22:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x %struct.S]* @_ZZ4mainE5s_arr to i8*), i64 8, i8*** @_ZZ4mainE5s_arr.cache.)
432 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to [2 x %struct.S]*
433 // CHECK1-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP23]], i64 0, i64 0
434 // CHECK1-NEXT:    [[CALL4:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYIDX3]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP21]])
435 // CHECK1-NEXT:    ret void
436 //
437 //
438 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
439 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
440 // CHECK1-NEXT:  entry:
441 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
442 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
443 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
444 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
445 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
446 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
447 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ4mainE5t_var to i8*), i64 4, i8*** @_ZZ4mainE5t_var.cache.)
448 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
449 // CHECK1-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
450 // CHECK1-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @_ZZ4mainE5t_var to i64), [[TMP4]]
451 // CHECK1-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
452 // CHECK1:       copyin.not.master:
453 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ4mainE5t_var, align 4
454 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 4
455 // CHECK1-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
456 // CHECK1:       copyin.not.master.end:
457 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]])
458 // CHECK1-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ4mainE5t_var to i8*), i64 4, i8*** @_ZZ4mainE5t_var.cache.)
459 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
460 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
461 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP9]], 1
462 // CHECK1-NEXT:    store i32 [[INC]], i32* [[TMP8]], align 4
463 // CHECK1-NEXT:    ret void
464 //
465 //
466 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
467 // CHECK1-SAME: () #[[ATTR2]] comdat {
468 // CHECK1-NEXT:  entry:
469 // CHECK1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
470 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
471 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
472 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]])
473 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]])
474 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR3]]
475 // CHECK1-NEXT:    [[TMP0:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE5s_arr to i8*) acquire, align 8
476 // CHECK1-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
477 // CHECK1-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2]]
478 // CHECK1:       init.check:
479 // CHECK1-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(i64* @_ZGVZ5tmainIiET_vE5s_arr) #[[ATTR3]]
480 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
481 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]]
482 // CHECK1:       init:
483 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
484 // CHECK1-NEXT:    call void @__kmpc_threadprivate_register(%struct.ident_t* @[[GLOB1]], i8* bitcast ([2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr to i8*), i8* (i8*)* @.__kmpc_global_ctor_..4, i8* (i8*, i8*)* null, void (i8*)* @.__kmpc_global_dtor_..5)
485 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 0), i32 1)
486 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 1), i32 2)
487 // CHECK1-NEXT:    [[TMP3:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor.6, i8* null, i8* @__dso_handle) #[[ATTR3]]
488 // CHECK1-NEXT:    call void @__cxa_guard_release(i64* @_ZGVZ5tmainIiET_vE5s_arr) #[[ATTR3]]
489 // CHECK1-NEXT:    br label [[INIT_END]]
490 // CHECK1:       init.end:
491 // CHECK1-NEXT:    [[TMP4:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE3var to i8*) acquire, align 8
492 // CHECK1-NEXT:    [[GUARD_UNINITIALIZED1:%.*]] = icmp eq i8 [[TMP4]], 0
493 // CHECK1-NEXT:    br i1 [[GUARD_UNINITIALIZED1]], label [[INIT_CHECK2:%.*]], label [[INIT_END5:%.*]], !prof [[PROF2]]
494 // CHECK1:       init.check2:
495 // CHECK1-NEXT:    [[TMP5:%.*]] = call i32 @__cxa_guard_acquire(i64* @_ZGVZ5tmainIiET_vE3var) #[[ATTR3]]
496 // CHECK1-NEXT:    [[TOBOOL3:%.*]] = icmp ne i32 [[TMP5]], 0
497 // CHECK1-NEXT:    br i1 [[TOBOOL3]], label [[INIT4:%.*]], label [[INIT_END5]]
498 // CHECK1:       init4:
499 // CHECK1-NEXT:    [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
500 // CHECK1-NEXT:    call void @__kmpc_threadprivate_register(%struct.ident_t* @[[GLOB1]], i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i8* (i8*)* @.__kmpc_global_ctor_..7, i8* (i8*, i8*)* null, void (i8*)* @.__kmpc_global_dtor_..8)
501 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var, i32 3)
502 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S.0*)* @_ZN1SIiED1Ev to void (i8*)*), i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i8* @__dso_handle) #[[ATTR3]]
503 // CHECK1-NEXT:    call void @__cxa_guard_release(i64* @_ZGVZ5tmainIiET_vE3var) #[[ATTR3]]
504 // CHECK1-NEXT:    br label [[INIT_END5]]
505 // CHECK1:       init.end5:
506 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*))
507 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*))
508 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR3]]
509 // CHECK1-NEXT:    ret i32 0
510 //
511 //
512 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
513 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
514 // CHECK1-NEXT:  entry:
515 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
516 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
517 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
518 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
519 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
520 // CHECK1-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
521 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
522 // CHECK1-NEXT:    [[TMP3:%.*]] = load volatile i32, i32* [[TMP2]], align 128
523 // CHECK1-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP3]] to float
524 // CHECK1-NEXT:    store float [[CONV]], float* [[F]], align 4
525 // CHECK1-NEXT:    ret void
526 //
527 //
528 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
529 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
530 // CHECK1-NEXT:  entry:
531 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
532 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
533 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
534 // CHECK1-NEXT:    ret void
535 //
536 //
537 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
538 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
539 // CHECK1-NEXT:  entry:
540 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
541 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
542 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
543 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
544 // CHECK1-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
545 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
546 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
547 // CHECK1-NEXT:    [[TMP1:%.*]] = load float, float* [[A_ADDR]], align 4
548 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
549 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
550 // CHECK1-NEXT:    [[TMP4:%.*]] = load volatile i32, i32* [[TMP3]], align 128
551 // CHECK1-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP4]] to float
552 // CHECK1-NEXT:    [[ADD:%.*]] = fadd float [[TMP1]], [[CONV]]
553 // CHECK1-NEXT:    store float [[ADD]], float* [[F]], align 4
554 // CHECK1-NEXT:    ret void
555 //
556 //
557 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
558 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
559 // CHECK1-NEXT:  entry:
560 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
561 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
562 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
563 // CHECK1-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
564 // CHECK1-NEXT:    ret void
565 //
566 //
567 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEaSERKS0_
568 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR2]] comdat align 2 {
569 // CHECK1-NEXT:  entry:
570 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
571 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
572 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
573 // CHECK1-NEXT:    store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
574 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
575 // CHECK1-NEXT:    ret %struct.S.0* [[THIS1]]
576 //
577 //
578 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
579 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
580 // CHECK1-NEXT:  entry:
581 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
582 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
583 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
584 // CHECK1-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
585 // CHECK1-NEXT:    ret void
586 //
587 //
588 // CHECK1-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..4
589 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
590 // CHECK1-NEXT:  entry:
591 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
592 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
593 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
594 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to [2 x %struct.S.0]*
595 // CHECK1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP2]], i64 0, i64 0
596 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
597 // CHECK1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
598 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
599 // CHECK1-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
600 // CHECK1-NEXT:    ret i8* [[TMP3]]
601 //
602 //
603 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
604 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
605 // CHECK1-NEXT:  entry:
606 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
607 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
608 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
609 // CHECK1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
610 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
611 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
612 // CHECK1-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
613 // CHECK1-NEXT:    ret void
614 //
615 //
616 // CHECK1-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..5
617 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
618 // CHECK1-NEXT:  entry:
619 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
620 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
621 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
622 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = bitcast i8* [[TMP1]] to %struct.S.0*
623 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
624 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
625 // CHECK1:       arraydestroy.body:
626 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
627 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
628 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
629 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
630 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
631 // CHECK1:       arraydestroy.done1:
632 // CHECK1-NEXT:    ret void
633 //
634 //
635 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor.6
636 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
637 // CHECK1-NEXT:  entry:
638 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
639 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
640 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
641 // CHECK1:       arraydestroy.body:
642 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ getelementptr inbounds ([[STRUCT_S_0:%.*]], %struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
643 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
644 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
645 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0)
646 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
647 // CHECK1:       arraydestroy.done1:
648 // CHECK1-NEXT:    ret void
649 //
650 //
651 // CHECK1-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..7
652 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
653 // CHECK1-NEXT:  entry:
654 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
655 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
656 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
657 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.S.0*
658 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]], i32 3)
659 // CHECK1-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
660 // CHECK1-NEXT:    ret i8* [[TMP3]]
661 //
662 //
663 // CHECK1-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..8
664 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
665 // CHECK1-NEXT:  entry:
666 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
667 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
668 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
669 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.S.0*
670 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]]) #[[ATTR3]]
671 // CHECK1-NEXT:    ret void
672 //
673 //
674 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..9
675 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
676 // CHECK1-NEXT:  entry:
677 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
678 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
679 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
680 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
681 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
682 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
683 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ5tmainIiET_vE5t_var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE5t_var.cache.)
684 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
685 // CHECK1-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
686 // CHECK1-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @_ZZ5tmainIiET_vE5t_var to i64), [[TMP4]]
687 // CHECK1-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
688 // CHECK1:       copyin.not.master:
689 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ5tmainIiET_vE5t_var, align 128
690 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 128
691 // CHECK1-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x i32]* @_ZZ5tmainIiET_vE3vec to i8*), i64 8, i8*** @_ZZ5tmainIiET_vE3vec.cache.)
692 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to [2 x i32]*
693 // CHECK1-NEXT:    [[TMP9:%.*]] = bitcast [2 x i32]* [[TMP8]] to i8*
694 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP9]], i8* align 128 bitcast ([2 x i32]* @_ZZ5tmainIiET_vE3vec to i8*), i64 8, i1 false)
695 // CHECK1-NEXT:    [[TMP10:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr to i8*), i64 8, i8*** @_ZZ5tmainIiET_vE5s_arr.cache.)
696 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to [2 x %struct.S.0]*
697 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP11]], i32 0, i32 0
698 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
699 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP12]]
700 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
701 // CHECK1:       omp.arraycpy.body:
702 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
703 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
704 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
705 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
706 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
707 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
708 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
709 // CHECK1:       omp.arraycpy.done1:
710 // CHECK1-NEXT:    [[TMP13:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE3var.cache.)
711 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
712 // CHECK1-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]], %struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var)
713 // CHECK1-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
714 // CHECK1:       copyin.not.master.end:
715 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]])
716 // CHECK1-NEXT:    [[TMP15:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ5tmainIiET_vE5t_var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE5t_var.cache.)
717 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32*
718 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 128
719 // CHECK1-NEXT:    [[TMP18:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x i32]* @_ZZ5tmainIiET_vE3vec to i8*), i64 8, i8*** @_ZZ5tmainIiET_vE3vec.cache.)
720 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast i8* [[TMP18]] to [2 x i32]*
721 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP19]], i64 0, i64 0
722 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[ARRAYIDX]], align 128
723 // CHECK1-NEXT:    [[TMP20:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE3var.cache.)
724 // CHECK1-NEXT:    [[TMP21:%.*]] = bitcast i8* [[TMP20]] to %struct.S.0*
725 // CHECK1-NEXT:    [[TMP22:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr to i8*), i64 8, i8*** @_ZZ5tmainIiET_vE5s_arr.cache.)
726 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to [2 x %struct.S.0]*
727 // CHECK1-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP23]], i64 0, i64 0
728 // CHECK1-NEXT:    [[CALL4:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYIDX3]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP21]])
729 // CHECK1-NEXT:    ret void
730 //
731 //
732 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
733 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
734 // CHECK1-NEXT:  entry:
735 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
736 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
737 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
738 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
739 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
740 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
741 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ5tmainIiET_vE5t_var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE5t_var.cache.)
742 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
743 // CHECK1-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
744 // CHECK1-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @_ZZ5tmainIiET_vE5t_var to i64), [[TMP4]]
745 // CHECK1-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
746 // CHECK1:       copyin.not.master:
747 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ5tmainIiET_vE5t_var, align 128
748 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 128
749 // CHECK1-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
750 // CHECK1:       copyin.not.master.end:
751 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]])
752 // CHECK1-NEXT:    ret void
753 //
754 //
755 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
756 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
757 // CHECK1-NEXT:  entry:
758 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
759 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
760 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
761 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
762 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
763 // CHECK1-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
764 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
765 // CHECK1-NEXT:    [[TMP3:%.*]] = load volatile i32, i32* [[TMP2]], align 128
766 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[F]], align 4
767 // CHECK1-NEXT:    ret void
768 //
769 //
770 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
771 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
772 // CHECK1-NEXT:  entry:
773 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
774 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
775 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
776 // CHECK1-NEXT:    ret void
777 //
778 //
779 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
780 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
781 // CHECK1-NEXT:  entry:
782 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
783 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
784 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
785 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
786 // CHECK1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
787 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
788 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
789 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
790 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
791 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
792 // CHECK1-NEXT:    [[TMP4:%.*]] = load volatile i32, i32* [[TMP3]], align 128
793 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP4]]
794 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
795 // CHECK1-NEXT:    ret void
796 //
797 //
798 // CHECK2-LABEL: define {{[^@]+}}@main
799 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
800 // CHECK2-NEXT:  entry:
801 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
802 // CHECK2-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
803 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
804 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
805 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
806 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
807 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]], %struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
808 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR3:[0-9]+]]
809 // CHECK2-NEXT:    [[TMP0:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ4mainE5s_arr to i8*) acquire, align 8
810 // CHECK2-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
811 // CHECK2-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2:![0-9]+]]
812 // CHECK2:       init.check:
813 // CHECK2-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(i64* @_ZGVZ4mainE5s_arr) #[[ATTR3]]
814 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
815 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]]
816 // CHECK2:       init:
817 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
818 // CHECK2-NEXT:    call void @__kmpc_threadprivate_register(%struct.ident_t* @[[GLOB1]], i8* bitcast ([2 x %struct.S]* @_ZZ4mainE5s_arr to i8*), i8* (i8*)* @.__kmpc_global_ctor_., i8* (i8*, i8*)* null, void (i8*)* @.__kmpc_global_dtor_.)
819 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 0), float 1.000000e+00)
820 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 1), float 2.000000e+00)
821 // CHECK2-NEXT:    [[TMP3:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR3]]
822 // CHECK2-NEXT:    call void @__cxa_guard_release(i64* @_ZGVZ4mainE5s_arr) #[[ATTR3]]
823 // CHECK2-NEXT:    br label [[INIT_END]]
824 // CHECK2:       init.end:
825 // CHECK2-NEXT:    [[TMP4:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ4mainE3var to i8*) acquire, align 8
826 // CHECK2-NEXT:    [[GUARD_UNINITIALIZED1:%.*]] = icmp eq i8 [[TMP4]], 0
827 // CHECK2-NEXT:    br i1 [[GUARD_UNINITIALIZED1]], label [[INIT_CHECK2:%.*]], label [[INIT_END5:%.*]], !prof [[PROF2]]
828 // CHECK2:       init.check2:
829 // CHECK2-NEXT:    [[TMP5:%.*]] = call i32 @__cxa_guard_acquire(i64* @_ZGVZ4mainE3var) #[[ATTR3]]
830 // CHECK2-NEXT:    [[TOBOOL3:%.*]] = icmp ne i32 [[TMP5]], 0
831 // CHECK2-NEXT:    br i1 [[TOBOOL3]], label [[INIT4:%.*]], label [[INIT_END5]]
832 // CHECK2:       init4:
833 // CHECK2-NEXT:    [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
834 // CHECK2-NEXT:    call void @__kmpc_threadprivate_register(%struct.ident_t* @[[GLOB1]], i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i8* (i8*)* @.__kmpc_global_ctor_..1, i8* (i8*, i8*)* null, void (i8*)* @.__kmpc_global_dtor_..2)
835 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var, float 3.000000e+00)
836 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i8* @__dso_handle) #[[ATTR3]]
837 // CHECK2-NEXT:    call void @__cxa_guard_release(i64* @_ZGVZ4mainE3var) #[[ATTR3]]
838 // CHECK2-NEXT:    br label [[INIT_END5]]
839 // CHECK2:       init.end5:
840 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
841 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*))
842 // CHECK2-NEXT:    [[CALL6:%.*]] = call i32 @_Z5tmainIiET_v()
843 // CHECK2-NEXT:    store i32 [[CALL6]], i32* [[RETVAL]], align 4
844 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR3]]
845 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[RETVAL]], align 4
846 // CHECK2-NEXT:    ret i32 [[TMP8]]
847 //
848 //
849 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
850 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
851 // CHECK2-NEXT:  entry:
852 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
853 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
854 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
855 // CHECK2-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
856 // CHECK2-NEXT:    ret void
857 //
858 //
859 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEaSERKS0_
860 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 {
861 // CHECK2-NEXT:  entry:
862 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
863 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
864 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
865 // CHECK2-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
866 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
867 // CHECK2-NEXT:    ret %struct.S* [[THIS1]]
868 //
869 //
870 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
871 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
872 // CHECK2-NEXT:  entry:
873 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
874 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
875 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
876 // CHECK2-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
877 // CHECK2-NEXT:    ret void
878 //
879 //
880 // CHECK2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_.
881 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] section ".text.startup" {
882 // CHECK2-NEXT:  entry:
883 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
884 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
885 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
886 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to [2 x %struct.S]*
887 // CHECK2-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP2]], i64 0, i64 0
888 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
889 // CHECK2-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
890 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
891 // CHECK2-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
892 // CHECK2-NEXT:    ret i8* [[TMP3]]
893 //
894 //
895 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
896 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
897 // CHECK2-NEXT:  entry:
898 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
899 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
900 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
901 // CHECK2-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
902 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
903 // CHECK2-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
904 // CHECK2-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
905 // CHECK2-NEXT:    ret void
906 //
907 //
908 // CHECK2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_.
909 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
910 // CHECK2-NEXT:  entry:
911 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
912 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
913 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
914 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = bitcast i8* [[TMP1]] to %struct.S*
915 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
916 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
917 // CHECK2:       arraydestroy.body:
918 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
919 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
920 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
921 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
922 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
923 // CHECK2:       arraydestroy.done1:
924 // CHECK2-NEXT:    ret void
925 //
926 //
927 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
928 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
929 // CHECK2-NEXT:  entry:
930 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
931 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
932 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
933 // CHECK2:       arraydestroy.body:
934 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
935 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
936 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
937 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0)
938 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
939 // CHECK2:       arraydestroy.done1:
940 // CHECK2-NEXT:    ret void
941 //
942 //
943 // CHECK2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..1
944 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
945 // CHECK2-NEXT:  entry:
946 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
947 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
948 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
949 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.S*
950 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]], float 3.000000e+00)
951 // CHECK2-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
952 // CHECK2-NEXT:    ret i8* [[TMP3]]
953 //
954 //
955 // CHECK2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..2
956 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
957 // CHECK2-NEXT:  entry:
958 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
959 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
960 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
961 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.S*
962 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TMP2]]) #[[ATTR3]]
963 // CHECK2-NEXT:    ret void
964 //
965 //
966 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
967 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5:[0-9]+]] {
968 // CHECK2-NEXT:  entry:
969 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
970 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
971 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
972 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
973 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
974 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
975 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ4mainE5t_var to i8*), i64 4, i8*** @_ZZ4mainE5t_var.cache.)
976 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
977 // CHECK2-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
978 // CHECK2-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @_ZZ4mainE5t_var to i64), [[TMP4]]
979 // CHECK2-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
980 // CHECK2:       copyin.not.master:
981 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ4mainE5t_var, align 4
982 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 4
983 // CHECK2-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x i32]* @_ZZ4mainE3vec to i8*), i64 8, i8*** @_ZZ4mainE3vec.cache.)
984 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to [2 x i32]*
985 // CHECK2-NEXT:    [[TMP9:%.*]] = bitcast [2 x i32]* [[TMP8]] to i8*
986 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP9]], i8* align 4 bitcast ([2 x i32]* @_ZZ4mainE3vec to i8*), i64 8, i1 false)
987 // CHECK2-NEXT:    [[TMP10:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x %struct.S]* @_ZZ4mainE5s_arr to i8*), i64 8, i8*** @_ZZ4mainE5s_arr.cache.)
988 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to [2 x %struct.S]*
989 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP11]], i32 0, i32 0
990 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
991 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP12]]
992 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
993 // CHECK2:       omp.arraycpy.body:
994 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
995 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
996 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
997 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
998 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
999 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
1000 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
1001 // CHECK2:       omp.arraycpy.done1:
1002 // CHECK2-NEXT:    [[TMP13:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i64 4, i8*** @_ZZ4mainE3var.cache.)
1003 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
1004 // CHECK2-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP14]], %struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var)
1005 // CHECK2-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1006 // CHECK2:       copyin.not.master.end:
1007 // CHECK2-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
1008 // CHECK2-NEXT:    [[TMP15:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ4mainE5t_var to i8*), i64 4, i8*** @_ZZ4mainE5t_var.cache.)
1009 // CHECK2-NEXT:    [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32*
1010 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
1011 // CHECK2-NEXT:    [[TMP18:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x i32]* @_ZZ4mainE3vec to i8*), i64 8, i8*** @_ZZ4mainE3vec.cache.)
1012 // CHECK2-NEXT:    [[TMP19:%.*]] = bitcast i8* [[TMP18]] to [2 x i32]*
1013 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP19]], i64 0, i64 0
1014 // CHECK2-NEXT:    store i32 [[TMP17]], i32* [[ARRAYIDX]], align 4
1015 // CHECK2-NEXT:    [[TMP20:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i64 4, i8*** @_ZZ4mainE3var.cache.)
1016 // CHECK2-NEXT:    [[TMP21:%.*]] = bitcast i8* [[TMP20]] to %struct.S*
1017 // CHECK2-NEXT:    [[TMP22:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x %struct.S]* @_ZZ4mainE5s_arr to i8*), i64 8, i8*** @_ZZ4mainE5s_arr.cache.)
1018 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to [2 x %struct.S]*
1019 // CHECK2-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP23]], i64 0, i64 0
1020 // CHECK2-NEXT:    [[CALL4:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYIDX3]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP21]])
1021 // CHECK2-NEXT:    ret void
1022 //
1023 //
1024 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
1025 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
1026 // CHECK2-NEXT:  entry:
1027 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1028 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1029 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1030 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1031 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1032 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1033 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ4mainE5t_var to i8*), i64 4, i8*** @_ZZ4mainE5t_var.cache.)
1034 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
1035 // CHECK2-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
1036 // CHECK2-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @_ZZ4mainE5t_var to i64), [[TMP4]]
1037 // CHECK2-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1038 // CHECK2:       copyin.not.master:
1039 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ4mainE5t_var, align 4
1040 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 4
1041 // CHECK2-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1042 // CHECK2:       copyin.not.master.end:
1043 // CHECK2-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]])
1044 // CHECK2-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ4mainE5t_var to i8*), i64 4, i8*** @_ZZ4mainE5t_var.cache.)
1045 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1046 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1047 // CHECK2-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP9]], 1
1048 // CHECK2-NEXT:    store i32 [[INC]], i32* [[TMP8]], align 4
1049 // CHECK2-NEXT:    ret void
1050 //
1051 //
1052 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1053 // CHECK2-SAME: () #[[ATTR2]] comdat {
1054 // CHECK2-NEXT:  entry:
1055 // CHECK2-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1056 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
1057 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
1058 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]])
1059 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]])
1060 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR3]]
1061 // CHECK2-NEXT:    [[TMP0:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE5s_arr to i8*) acquire, align 8
1062 // CHECK2-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
1063 // CHECK2-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2]]
1064 // CHECK2:       init.check:
1065 // CHECK2-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(i64* @_ZGVZ5tmainIiET_vE5s_arr) #[[ATTR3]]
1066 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
1067 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]]
1068 // CHECK2:       init:
1069 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1070 // CHECK2-NEXT:    call void @__kmpc_threadprivate_register(%struct.ident_t* @[[GLOB1]], i8* bitcast ([2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr to i8*), i8* (i8*)* @.__kmpc_global_ctor_..4, i8* (i8*, i8*)* null, void (i8*)* @.__kmpc_global_dtor_..5)
1071 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 0), i32 1)
1072 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 1), i32 2)
1073 // CHECK2-NEXT:    [[TMP3:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor.6, i8* null, i8* @__dso_handle) #[[ATTR3]]
1074 // CHECK2-NEXT:    call void @__cxa_guard_release(i64* @_ZGVZ5tmainIiET_vE5s_arr) #[[ATTR3]]
1075 // CHECK2-NEXT:    br label [[INIT_END]]
1076 // CHECK2:       init.end:
1077 // CHECK2-NEXT:    [[TMP4:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE3var to i8*) acquire, align 8
1078 // CHECK2-NEXT:    [[GUARD_UNINITIALIZED1:%.*]] = icmp eq i8 [[TMP4]], 0
1079 // CHECK2-NEXT:    br i1 [[GUARD_UNINITIALIZED1]], label [[INIT_CHECK2:%.*]], label [[INIT_END5:%.*]], !prof [[PROF2]]
1080 // CHECK2:       init.check2:
1081 // CHECK2-NEXT:    [[TMP5:%.*]] = call i32 @__cxa_guard_acquire(i64* @_ZGVZ5tmainIiET_vE3var) #[[ATTR3]]
1082 // CHECK2-NEXT:    [[TOBOOL3:%.*]] = icmp ne i32 [[TMP5]], 0
1083 // CHECK2-NEXT:    br i1 [[TOBOOL3]], label [[INIT4:%.*]], label [[INIT_END5]]
1084 // CHECK2:       init4:
1085 // CHECK2-NEXT:    [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1086 // CHECK2-NEXT:    call void @__kmpc_threadprivate_register(%struct.ident_t* @[[GLOB1]], i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i8* (i8*)* @.__kmpc_global_ctor_..7, i8* (i8*, i8*)* null, void (i8*)* @.__kmpc_global_dtor_..8)
1087 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var, i32 3)
1088 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S.0*)* @_ZN1SIiED1Ev to void (i8*)*), i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i8* @__dso_handle) #[[ATTR3]]
1089 // CHECK2-NEXT:    call void @__cxa_guard_release(i64* @_ZGVZ5tmainIiET_vE3var) #[[ATTR3]]
1090 // CHECK2-NEXT:    br label [[INIT_END5]]
1091 // CHECK2:       init.end5:
1092 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*))
1093 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*))
1094 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR3]]
1095 // CHECK2-NEXT:    ret i32 0
1096 //
1097 //
1098 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
1099 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1100 // CHECK2-NEXT:  entry:
1101 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1102 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1103 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1104 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1105 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1106 // CHECK2-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
1107 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
1108 // CHECK2-NEXT:    [[TMP3:%.*]] = load volatile i32, i32* [[TMP2]], align 128
1109 // CHECK2-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP3]] to float
1110 // CHECK2-NEXT:    store float [[CONV]], float* [[F]], align 4
1111 // CHECK2-NEXT:    ret void
1112 //
1113 //
1114 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
1115 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1116 // CHECK2-NEXT:  entry:
1117 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1118 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1119 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1120 // CHECK2-NEXT:    ret void
1121 //
1122 //
1123 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
1124 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1125 // CHECK2-NEXT:  entry:
1126 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1127 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1128 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1129 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1130 // CHECK2-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1131 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1132 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1133 // CHECK2-NEXT:    [[TMP1:%.*]] = load float, float* [[A_ADDR]], align 4
1134 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
1135 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
1136 // CHECK2-NEXT:    [[TMP4:%.*]] = load volatile i32, i32* [[TMP3]], align 128
1137 // CHECK2-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP4]] to float
1138 // CHECK2-NEXT:    [[ADD:%.*]] = fadd float [[TMP1]], [[CONV]]
1139 // CHECK2-NEXT:    store float [[ADD]], float* [[F]], align 4
1140 // CHECK2-NEXT:    ret void
1141 //
1142 //
1143 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
1144 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1145 // CHECK2-NEXT:  entry:
1146 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1147 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1148 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1149 // CHECK2-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
1150 // CHECK2-NEXT:    ret void
1151 //
1152 //
1153 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEaSERKS0_
1154 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR2]] comdat align 2 {
1155 // CHECK2-NEXT:  entry:
1156 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1157 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
1158 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1159 // CHECK2-NEXT:    store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
1160 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1161 // CHECK2-NEXT:    ret %struct.S.0* [[THIS1]]
1162 //
1163 //
1164 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
1165 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1166 // CHECK2-NEXT:  entry:
1167 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1168 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1169 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1170 // CHECK2-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
1171 // CHECK2-NEXT:    ret void
1172 //
1173 //
1174 // CHECK2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..4
1175 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
1176 // CHECK2-NEXT:  entry:
1177 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1178 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1179 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1180 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to [2 x %struct.S.0]*
1181 // CHECK2-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP2]], i64 0, i64 0
1182 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
1183 // CHECK2-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
1184 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
1185 // CHECK2-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1186 // CHECK2-NEXT:    ret i8* [[TMP3]]
1187 //
1188 //
1189 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
1190 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1191 // CHECK2-NEXT:  entry:
1192 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1193 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1194 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1195 // CHECK2-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1196 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1197 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1198 // CHECK2-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
1199 // CHECK2-NEXT:    ret void
1200 //
1201 //
1202 // CHECK2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..5
1203 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
1204 // CHECK2-NEXT:  entry:
1205 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1206 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1207 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1208 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = bitcast i8* [[TMP1]] to %struct.S.0*
1209 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
1210 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1211 // CHECK2:       arraydestroy.body:
1212 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1213 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1214 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
1215 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1216 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1217 // CHECK2:       arraydestroy.done1:
1218 // CHECK2-NEXT:    ret void
1219 //
1220 //
1221 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor.6
1222 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
1223 // CHECK2-NEXT:  entry:
1224 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1225 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1226 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1227 // CHECK2:       arraydestroy.body:
1228 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ getelementptr inbounds ([[STRUCT_S_0:%.*]], %struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1229 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1230 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
1231 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0)
1232 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1233 // CHECK2:       arraydestroy.done1:
1234 // CHECK2-NEXT:    ret void
1235 //
1236 //
1237 // CHECK2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..7
1238 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
1239 // CHECK2-NEXT:  entry:
1240 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1241 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1242 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1243 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.S.0*
1244 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]], i32 3)
1245 // CHECK2-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1246 // CHECK2-NEXT:    ret i8* [[TMP3]]
1247 //
1248 //
1249 // CHECK2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..8
1250 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4]] section ".text.startup" {
1251 // CHECK2-NEXT:  entry:
1252 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1253 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1254 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1255 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.S.0*
1256 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP2]]) #[[ATTR3]]
1257 // CHECK2-NEXT:    ret void
1258 //
1259 //
1260 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9
1261 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
1262 // CHECK2-NEXT:  entry:
1263 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1264 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1265 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1266 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1267 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1268 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1269 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ5tmainIiET_vE5t_var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE5t_var.cache.)
1270 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
1271 // CHECK2-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
1272 // CHECK2-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @_ZZ5tmainIiET_vE5t_var to i64), [[TMP4]]
1273 // CHECK2-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1274 // CHECK2:       copyin.not.master:
1275 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ5tmainIiET_vE5t_var, align 128
1276 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 128
1277 // CHECK2-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x i32]* @_ZZ5tmainIiET_vE3vec to i8*), i64 8, i8*** @_ZZ5tmainIiET_vE3vec.cache.)
1278 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to [2 x i32]*
1279 // CHECK2-NEXT:    [[TMP9:%.*]] = bitcast [2 x i32]* [[TMP8]] to i8*
1280 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP9]], i8* align 128 bitcast ([2 x i32]* @_ZZ5tmainIiET_vE3vec to i8*), i64 8, i1 false)
1281 // CHECK2-NEXT:    [[TMP10:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr to i8*), i64 8, i8*** @_ZZ5tmainIiET_vE5s_arr.cache.)
1282 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to [2 x %struct.S.0]*
1283 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP11]], i32 0, i32 0
1284 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
1285 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP12]]
1286 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1287 // CHECK2:       omp.arraycpy.body:
1288 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1289 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1290 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
1291 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1292 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1293 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
1294 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
1295 // CHECK2:       omp.arraycpy.done1:
1296 // CHECK2-NEXT:    [[TMP13:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE3var.cache.)
1297 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
1298 // CHECK2-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]], %struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var)
1299 // CHECK2-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1300 // CHECK2:       copyin.not.master.end:
1301 // CHECK2-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]])
1302 // CHECK2-NEXT:    [[TMP15:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ5tmainIiET_vE5t_var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE5t_var.cache.)
1303 // CHECK2-NEXT:    [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32*
1304 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 128
1305 // CHECK2-NEXT:    [[TMP18:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x i32]* @_ZZ5tmainIiET_vE3vec to i8*), i64 8, i8*** @_ZZ5tmainIiET_vE3vec.cache.)
1306 // CHECK2-NEXT:    [[TMP19:%.*]] = bitcast i8* [[TMP18]] to [2 x i32]*
1307 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP19]], i64 0, i64 0
1308 // CHECK2-NEXT:    store i32 [[TMP17]], i32* [[ARRAYIDX]], align 128
1309 // CHECK2-NEXT:    [[TMP20:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE3var.cache.)
1310 // CHECK2-NEXT:    [[TMP21:%.*]] = bitcast i8* [[TMP20]] to %struct.S.0*
1311 // CHECK2-NEXT:    [[TMP22:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr to i8*), i64 8, i8*** @_ZZ5tmainIiET_vE5s_arr.cache.)
1312 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to [2 x %struct.S.0]*
1313 // CHECK2-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP23]], i64 0, i64 0
1314 // CHECK2-NEXT:    [[CALL4:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYIDX3]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP21]])
1315 // CHECK2-NEXT:    ret void
1316 //
1317 //
1318 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
1319 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
1320 // CHECK2-NEXT:  entry:
1321 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1322 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1323 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1324 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1325 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1326 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1327 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @_ZZ5tmainIiET_vE5t_var to i8*), i64 4, i8*** @_ZZ5tmainIiET_vE5t_var.cache.)
1328 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
1329 // CHECK2-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
1330 // CHECK2-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @_ZZ5tmainIiET_vE5t_var to i64), [[TMP4]]
1331 // CHECK2-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1332 // CHECK2:       copyin.not.master:
1333 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ5tmainIiET_vE5t_var, align 128
1334 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 128
1335 // CHECK2-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1336 // CHECK2:       copyin.not.master.end:
1337 // CHECK2-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]])
1338 // CHECK2-NEXT:    ret void
1339 //
1340 //
1341 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1342 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1343 // CHECK2-NEXT:  entry:
1344 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1345 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1346 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1347 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1348 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1349 // CHECK2-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
1350 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
1351 // CHECK2-NEXT:    [[TMP3:%.*]] = load volatile i32, i32* [[TMP2]], align 128
1352 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[F]], align 4
1353 // CHECK2-NEXT:    ret void
1354 //
1355 //
1356 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1357 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1358 // CHECK2-NEXT:  entry:
1359 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1360 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1361 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1362 // CHECK2-NEXT:    ret void
1363 //
1364 //
1365 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1366 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1367 // CHECK2-NEXT:  entry:
1368 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1369 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1370 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1371 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1372 // CHECK2-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1373 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1374 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1375 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
1376 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
1377 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
1378 // CHECK2-NEXT:    [[TMP4:%.*]] = load volatile i32, i32* [[TMP3]], align 128
1379 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP4]]
1380 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
1381 // CHECK2-NEXT:    ret void
1382 //
1383 //
1384 // CHECK3-LABEL: define {{[^@]+}}@main
1385 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
1386 // CHECK3-NEXT:  entry:
1387 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1388 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
1389 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1390 // CHECK3-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
1391 // CHECK3-NEXT:    ret i32 0
1392 //
1393 //
1394 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
1395 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2:[0-9]+]] {
1396 // CHECK3-NEXT:  entry:
1397 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1398 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1399 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 1
1400 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1401 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1402 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1403 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1404 // CHECK3-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
1405 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
1406 // CHECK3-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
1407 // CHECK3-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @g to i64), [[TMP4]]
1408 // CHECK3-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1409 // CHECK3:       copyin.not.master:
1410 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* @g, align 128
1411 // CHECK3-NEXT:    store volatile i32 [[TMP6]], i32* [[TMP3]], align 128
1412 // CHECK3-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1413 // CHECK3:       copyin.not.master.end:
1414 // CHECK3-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
1415 // CHECK3-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
1416 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1417 // CHECK3-NEXT:    store volatile i32 1, i32* [[TMP8]], align 128
1418 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 1 dereferenceable(1) [[REF_TMP]])
1419 // CHECK3-NEXT:    ret void
1420 //
1421 //
1422 // CHECK4-LABEL: define {{[^@]+}}@main
1423 // CHECK4-SAME: () #[[ATTR1:[0-9]+]] {
1424 // CHECK4-NEXT:  entry:
1425 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1426 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1427 // CHECK4-NEXT:    [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
1428 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
1429 // CHECK4-NEXT:    call void [[TMP1]](i8* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
1430 // CHECK4-NEXT:    ret i32 0
1431 //
1432 //
1433 // CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke
1434 // CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
1435 // CHECK4-NEXT:  entry:
1436 // CHECK4-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1437 // CHECK4-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
1438 // CHECK4-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1439 // CHECK4-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
1440 // CHECK4-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
1441 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1442 // CHECK4-NEXT:    ret void
1443 //
1444 //
1445 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
1446 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
1447 // CHECK4-NEXT:  entry:
1448 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1449 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1450 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1451 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1452 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1453 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1454 // CHECK4-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
1455 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
1456 // CHECK4-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
1457 // CHECK4-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @g to i64), [[TMP4]]
1458 // CHECK4-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1459 // CHECK4:       copyin.not.master:
1460 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* @g, align 128
1461 // CHECK4-NEXT:    store volatile i32 [[TMP6]], i32* [[TMP3]], align 128
1462 // CHECK4-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1463 // CHECK4:       copyin.not.master.end:
1464 // CHECK4-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
1465 // CHECK4-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
1466 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1467 // CHECK4-NEXT:    store volatile i32 1, i32* [[TMP8]], align 128
1468 // CHECK4-NEXT:    [[TMP9:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global.2 to %struct.__block_literal_generic*), i32 0, i32 3), align 8
1469 // CHECK4-NEXT:    [[TMP10:%.*]] = bitcast i8* [[TMP9]] to void (i8*)*
1470 // CHECK4-NEXT:    call void [[TMP10]](i8* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global.2 to i8*))
1471 // CHECK4-NEXT:    ret void
1472 //
1473 //
1474 // CHECK4-LABEL: define {{[^@]+}}@g_block_invoke
1475 // CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
1476 // CHECK4-NEXT:  entry:
1477 // CHECK4-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1478 // CHECK4-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
1479 // CHECK4-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1480 // CHECK4-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1481 // CHECK4-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
1482 // CHECK4-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
1483 // CHECK4-NEXT:    [[TMP1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* bitcast (i32* @g to i8*), i64 4, i8*** @g.cache.)
1484 // CHECK4-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
1485 // CHECK4-NEXT:    store volatile i32 2, i32* [[TMP2]], align 128
1486 // CHECK4-NEXT:    ret void
1487 //
1488 //
1489 // CHECK5-LABEL: define {{[^@]+}}@_Z10array_funcv
1490 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
1491 // CHECK5-NEXT:  entry:
1492 // CHECK5-NEXT:    [[TMP0:%.*]] = load atomic i8, i8* bitcast (i64* @_ZGVZ10array_funcvE1s to i8*) acquire, align 8
1493 // CHECK5-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
1494 // CHECK5-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2:![0-9]+]]
1495 // CHECK5:       init.check:
1496 // CHECK5-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(i64* @_ZGVZ10array_funcvE1s) #[[ATTR1:[0-9]+]]
1497 // CHECK5-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
1498 // CHECK5-NEXT:    br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]]
1499 // CHECK5:       init:
1500 // CHECK5-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
1501 // CHECK5-NEXT:    call void @__kmpc_threadprivate_register(%struct.ident_t* @[[GLOB1]], i8* bitcast ([2 x %struct.St]* @_ZZ10array_funcvE1s to i8*), i8* (i8*)* @.__kmpc_global_ctor_., i8* (i8*, i8*)* null, void (i8*)* @.__kmpc_global_dtor_.)
1502 // CHECK5-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1503 // CHECK5:       arrayctor.loop:
1504 // CHECK5-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.St* [ getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), [[INIT]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1505 // CHECK5-NEXT:    call void @_ZN2StC1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[ARRAYCTOR_CUR]])
1506 // CHECK5-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[ARRAYCTOR_CUR]], i64 1
1507 // CHECK5-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.St* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([[STRUCT_ST]], %struct.St* getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), i64 2)
1508 // CHECK5-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1509 // CHECK5:       arrayctor.cont:
1510 // CHECK5-NEXT:    [[TMP3:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR1]]
1511 // CHECK5-NEXT:    call void @__cxa_guard_release(i64* @_ZGVZ10array_funcvE1s) #[[ATTR1]]
1512 // CHECK5-NEXT:    br label [[INIT_END]]
1513 // CHECK5:       init.end:
1514 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1515 // CHECK5-NEXT:    ret void
1516 //
1517 //
1518 // CHECK5-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_.
1519 // CHECK5-SAME: (i8* [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" {
1520 // CHECK5-NEXT:  entry:
1521 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1522 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1523 // CHECK5-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1524 // CHECK5-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to [2 x %struct.St]*
1525 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.St], [2 x %struct.St]* [[TMP2]], i32 0, i32 0
1526 // CHECK5-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[ARRAY_BEGIN]], i64 2
1527 // CHECK5-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1528 // CHECK5:       arrayctor.loop:
1529 // CHECK5-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.St* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1530 // CHECK5-NEXT:    call void @_ZN2StC1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[ARRAYCTOR_CUR]])
1531 // CHECK5-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[ARRAYCTOR_CUR]], i64 1
1532 // CHECK5-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.St* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1533 // CHECK5-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1534 // CHECK5:       arrayctor.cont:
1535 // CHECK5-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1536 // CHECK5-NEXT:    ret i8* [[TMP3]]
1537 //
1538 //
1539 // CHECK5-LABEL: define {{[^@]+}}@_ZN2StC1Ev
1540 // CHECK5-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3:[0-9]+]] align 2 {
1541 // CHECK5-NEXT:  entry:
1542 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1543 // CHECK5-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1544 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1545 // CHECK5-NEXT:    call void @_ZN2StC2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]])
1546 // CHECK5-NEXT:    ret void
1547 //
1548 //
1549 // CHECK5-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_.
1550 // CHECK5-SAME: (i8* [[TMP0:%.*]]) #[[ATTR2]] section "__TEXT,__StaticInit,regular,pure_instructions" {
1551 // CHECK5-NEXT:  entry:
1552 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1553 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1554 // CHECK5-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1555 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = bitcast i8* [[TMP1]] to %struct.St*
1556 // CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[ARRAY_BEGIN]], i64 2
1557 // CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1558 // CHECK5:       arraydestroy.body:
1559 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.St* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1560 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1561 // CHECK5-NEXT:    call void @_ZN2StD1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR1]]
1562 // CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.St* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1563 // CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1564 // CHECK5:       arraydestroy.done1:
1565 // CHECK5-NEXT:    ret void
1566 //
1567 //
1568 // CHECK5-LABEL: define {{[^@]+}}@_ZN2StD1Ev
1569 // CHECK5-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 {
1570 // CHECK5-NEXT:  entry:
1571 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1572 // CHECK5-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1573 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1574 // CHECK5-NEXT:    call void @_ZN2StD2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR1]]
1575 // CHECK5-NEXT:    ret void
1576 //
1577 //
1578 // CHECK5-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
1579 // CHECK5-SAME: (i8* [[TMP0:%.*]]) #[[ATTR2]] section "__TEXT,__StaticInit,regular,pure_instructions" {
1580 // CHECK5-NEXT:  entry:
1581 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1582 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1583 // CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1584 // CHECK5:       arraydestroy.body:
1585 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.St* [ getelementptr inbounds ([[STRUCT_ST:%.*]], %struct.St* getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1586 // CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1587 // CHECK5-NEXT:    call void @_ZN2StD1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR1]]
1588 // CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.St* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0)
1589 // CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1590 // CHECK5:       arraydestroy.done1:
1591 // CHECK5-NEXT:    ret void
1592 //
1593 //
1594 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
1595 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR4:[0-9]+]] {
1596 // CHECK5-NEXT:  entry:
1597 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1598 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1599 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1600 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1601 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1602 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1603 // CHECK5-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x i32]* @_ZZ10array_funcvE1a to i8*), i64 8, i8*** @_ZZ10array_funcvE1a.cache.)
1604 // CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i32]*
1605 // CHECK5-NEXT:    [[TMP4:%.*]] = ptrtoint [2 x i32]* [[TMP3]] to i64
1606 // CHECK5-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint ([2 x i32]* @_ZZ10array_funcvE1a to i64), [[TMP4]]
1607 // CHECK5-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1608 // CHECK5:       copyin.not.master:
1609 // CHECK5-NEXT:    [[TMP6:%.*]] = bitcast [2 x i32]* [[TMP3]] to i8*
1610 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP6]], i8* align 4 bitcast ([2 x i32]* @_ZZ10array_funcvE1a to i8*), i64 8, i1 false)
1611 // CHECK5-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast ([2 x %struct.St]* @_ZZ10array_funcvE1s to i8*), i64 16, i8*** @_ZZ10array_funcvE1s.cache.)
1612 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to [2 x %struct.St]*
1613 // CHECK5-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.St], [2 x %struct.St]* [[TMP8]], i32 0, i32 0
1614 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr [[STRUCT_ST:%.*]], %struct.St* [[ARRAY_BEGIN]], i64 2
1615 // CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.St* [[ARRAY_BEGIN]], [[TMP9]]
1616 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1617 // CHECK5:       omp.arraycpy.body:
1618 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.St* [ getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1619 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.St* [ [[ARRAY_BEGIN]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1620 // CHECK5-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.St* @_ZN2StaSERKS_(%struct.St* nonnull align 4 dereferenceable(8) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.St* nonnull align 4 dereferenceable(8) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
1621 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_ST]], %struct.St* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1622 // CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_ST]], %struct.St* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1623 // CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.St* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP9]]
1624 // CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
1625 // CHECK5:       omp.arraycpy.done1:
1626 // CHECK5-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1627 // CHECK5:       copyin.not.master.end:
1628 // CHECK5-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
1629 // CHECK5-NEXT:    ret void
1630 //
1631 //
1632 // CHECK5-LABEL: define {{[^@]+}}@_ZN2StaSERKS_
1633 // CHECK5-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]], %struct.St* nonnull align 4 dereferenceable(8) [[TMP0:%.*]]) #[[ATTR0]] align 2 {
1634 // CHECK5-NEXT:  entry:
1635 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1636 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca %struct.St*, align 8
1637 // CHECK5-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1638 // CHECK5-NEXT:    store %struct.St* [[TMP0]], %struct.St** [[DOTADDR]], align 8
1639 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1640 // CHECK5-NEXT:    ret %struct.St* [[THIS1]]
1641 //
1642 //
1643 // CHECK5-LABEL: define {{[^@]+}}@_ZN2StC2Ev
1644 // CHECK5-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 {
1645 // CHECK5-NEXT:  entry:
1646 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1647 // CHECK5-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1648 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1649 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
1650 // CHECK5-NEXT:    store i32 0, i32* [[A]], align 4
1651 // CHECK5-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
1652 // CHECK5-NEXT:    store i32 0, i32* [[B]], align 4
1653 // CHECK5-NEXT:    ret void
1654 //
1655 //
1656 // CHECK5-LABEL: define {{[^@]+}}@_ZN2StD2Ev
1657 // CHECK5-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 {
1658 // CHECK5-NEXT:  entry:
1659 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1660 // CHECK5-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1661 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1662 // CHECK5-NEXT:    ret void
1663 //
1664 //
1665 // CHECK11-LABEL: define {{[^@]+}}@main
1666 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
1667 // CHECK11-NEXT:  entry:
1668 // CHECK11-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1669 // CHECK11-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1670 // CHECK11-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
1671 // CHECK11-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1672 // CHECK11-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
1673 // CHECK11-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
1674 // CHECK11-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]], %struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
1675 // CHECK11-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4:[0-9]+]]
1676 // CHECK11-NEXT:    [[TMP0:%.*]] = load i8, i8* @_ZGVZ4mainE5s_arr, align 1
1677 // CHECK11-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
1678 // CHECK11-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2:![0-9]+]]
1679 // CHECK11:       init.check:
1680 // CHECK11-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 0), float 1.000000e+00)
1681 // CHECK11-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 1), float 2.000000e+00)
1682 // CHECK11-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR4]]
1683 // CHECK11-NEXT:    store i8 1, i8* @_ZGVZ4mainE5s_arr, align 1
1684 // CHECK11-NEXT:    br label [[INIT_END]]
1685 // CHECK11:       init.end:
1686 // CHECK11-NEXT:    [[TMP2:%.*]] = load i8, i8* @_ZGVZ4mainE3var, align 1
1687 // CHECK11-NEXT:    [[GUARD_UNINITIALIZED1:%.*]] = icmp eq i8 [[TMP2]], 0
1688 // CHECK11-NEXT:    br i1 [[GUARD_UNINITIALIZED1]], label [[INIT_CHECK2:%.*]], label [[INIT_END3:%.*]], !prof [[PROF2]]
1689 // CHECK11:       init.check2:
1690 // CHECK11-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var, float 3.000000e+00)
1691 // CHECK11-NEXT:    [[TMP3:%.*]] = call i32 @__cxa_thread_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i8* @__dso_handle) #[[ATTR4]]
1692 // CHECK11-NEXT:    store i8 1, i8* @_ZGVZ4mainE3var, align 1
1693 // CHECK11-NEXT:    br label [[INIT_END3]]
1694 // CHECK11:       init.end3:
1695 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S]*, %struct.S*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @_ZZ4mainE5t_var, [2 x i32]* @_ZZ4mainE3vec, [2 x %struct.S]* @_ZZ4mainE5s_arr, %struct.S* @_ZZ4mainE3var)
1696 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* @_ZZ4mainE5t_var)
1697 // CHECK11-NEXT:    [[CALL4:%.*]] = call i32 @_Z5tmainIiET_v()
1698 // CHECK11-NEXT:    store i32 [[CALL4]], i32* [[RETVAL]], align 4
1699 // CHECK11-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1700 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
1701 // CHECK11-NEXT:    ret i32 [[TMP4]]
1702 //
1703 //
1704 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
1705 // CHECK11-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
1706 // CHECK11-NEXT:  entry:
1707 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1708 // CHECK11-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1709 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1710 // CHECK11-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
1711 // CHECK11-NEXT:    ret void
1712 //
1713 //
1714 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfEaSERKS0_
1715 // CHECK11-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 {
1716 // CHECK11-NEXT:  entry:
1717 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1718 // CHECK11-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
1719 // CHECK11-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1720 // CHECK11-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
1721 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1722 // CHECK11-NEXT:    ret %struct.S* [[THIS1]]
1723 //
1724 //
1725 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
1726 // CHECK11-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1727 // CHECK11-NEXT:  entry:
1728 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1729 // CHECK11-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1730 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1731 // CHECK11-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
1732 // CHECK11-NEXT:    ret void
1733 //
1734 //
1735 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
1736 // CHECK11-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1737 // CHECK11-NEXT:  entry:
1738 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1739 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1740 // CHECK11-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1741 // CHECK11-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1742 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1743 // CHECK11-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1744 // CHECK11-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
1745 // CHECK11-NEXT:    ret void
1746 //
1747 //
1748 // CHECK11-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
1749 // CHECK11-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] section ".text.startup" {
1750 // CHECK11-NEXT:  entry:
1751 // CHECK11-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1752 // CHECK11-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1753 // CHECK11-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1754 // CHECK11:       arraydestroy.body:
1755 // CHECK11-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1756 // CHECK11-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1757 // CHECK11-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1758 // CHECK11-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0)
1759 // CHECK11-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1760 // CHECK11:       arraydestroy.done1:
1761 // CHECK11-NEXT:    ret void
1762 //
1763 //
1764 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
1765 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR5:[0-9]+]] {
1766 // CHECK11-NEXT:  entry:
1767 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1768 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1769 // CHECK11-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
1770 // CHECK11-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
1771 // CHECK11-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
1772 // CHECK11-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
1773 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1774 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1775 // CHECK11-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
1776 // CHECK11-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
1777 // CHECK11-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1778 // CHECK11-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
1779 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
1780 // CHECK11-NEXT:    [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
1781 // CHECK11-NEXT:    [[TMP2:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
1782 // CHECK11-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
1783 // CHECK11-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP0]] to i64
1784 // CHECK11-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], ptrtoint (i32* @_ZZ4mainE5t_var to i64)
1785 // CHECK11-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1786 // CHECK11:       copyin.not.master:
1787 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP0]], align 4
1788 // CHECK11-NEXT:    store i32 [[TMP6]], i32* @_ZZ4mainE5t_var, align 4
1789 // CHECK11-NEXT:    [[TMP7:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8*
1790 // CHECK11-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 bitcast ([2 x i32]* @_ZZ4mainE3vec to i8*), i8* align 4 [[TMP7]], i64 8, i1 false)
1791 // CHECK11-NEXT:    [[TMP8:%.*]] = bitcast [2 x %struct.S]* [[TMP2]] to %struct.S*
1792 // CHECK11-NEXT:    br i1 icmp eq (%struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), %struct.S* getelementptr ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), i64 2)), label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1793 // CHECK11:       omp.arraycpy.body:
1794 // CHECK11-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP8]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1795 // CHECK11-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1796 // CHECK11-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
1797 // CHECK11-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1798 // CHECK11-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1799 // CHECK11-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], getelementptr ([[STRUCT_S]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), i64 2)
1800 // CHECK11-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
1801 // CHECK11:       omp.arraycpy.done1:
1802 // CHECK11-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var, %struct.S* nonnull align 4 dereferenceable(4) [[TMP3]])
1803 // CHECK11-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1804 // CHECK11:       copyin.not.master.end:
1805 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1806 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
1807 // CHECK11-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP10]])
1808 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* @_ZZ4mainE5t_var, align 4
1809 // CHECK11-NEXT:    store i32 [[TMP11]], i32* getelementptr inbounds ([2 x i32], [2 x i32]* @_ZZ4mainE3vec, i64 0, i64 0), align 4
1810 // CHECK11-NEXT:    [[CALL3:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 0), %struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var)
1811 // CHECK11-NEXT:    ret void
1812 //
1813 //
1814 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
1815 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR5]] {
1816 // CHECK11-NEXT:  entry:
1817 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1818 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1819 // CHECK11-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
1820 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1821 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1822 // CHECK11-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
1823 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
1824 // CHECK11-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[TMP0]] to i64
1825 // CHECK11-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], ptrtoint (i32* @_ZZ4mainE5t_var to i64)
1826 // CHECK11-NEXT:    br i1 [[TMP2]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1827 // CHECK11:       copyin.not.master:
1828 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
1829 // CHECK11-NEXT:    store i32 [[TMP3]], i32* @_ZZ4mainE5t_var, align 4
1830 // CHECK11-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1831 // CHECK11:       copyin.not.master.end:
1832 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1833 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
1834 // CHECK11-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
1835 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ4mainE5t_var, align 4
1836 // CHECK11-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP6]], 1
1837 // CHECK11-NEXT:    store i32 [[INC]], i32* @_ZZ4mainE5t_var, align 4
1838 // CHECK11-NEXT:    ret void
1839 //
1840 //
1841 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1842 // CHECK11-SAME: () #[[ATTR2]] comdat {
1843 // CHECK11-NEXT:  entry:
1844 // CHECK11-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1845 // CHECK11-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
1846 // CHECK11-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
1847 // CHECK11-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]])
1848 // CHECK11-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]])
1849 // CHECK11-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
1850 // CHECK11-NEXT:    [[TMP0:%.*]] = load i8, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE5s_arr to i8*), align 8
1851 // CHECK11-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
1852 // CHECK11-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2]]
1853 // CHECK11:       init.check:
1854 // CHECK11-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 0), i32 1)
1855 // CHECK11-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 1), i32 2)
1856 // CHECK11-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(void (i8*)* @__cxx_global_array_dtor.2, i8* null, i8* @__dso_handle) #[[ATTR4]]
1857 // CHECK11-NEXT:    store i8 1, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE5s_arr to i8*), align 8
1858 // CHECK11-NEXT:    br label [[INIT_END]]
1859 // CHECK11:       init.end:
1860 // CHECK11-NEXT:    [[TMP2:%.*]] = load i8, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE3var to i8*), align 8
1861 // CHECK11-NEXT:    [[GUARD_UNINITIALIZED1:%.*]] = icmp eq i8 [[TMP2]], 0
1862 // CHECK11-NEXT:    br i1 [[GUARD_UNINITIALIZED1]], label [[INIT_CHECK2:%.*]], label [[INIT_END3:%.*]], !prof [[PROF2]]
1863 // CHECK11:       init.check2:
1864 // CHECK11-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var, i32 3)
1865 // CHECK11-NEXT:    [[TMP3:%.*]] = call i32 @__cxa_thread_atexit(void (i8*)* bitcast (void (%struct.S.0*)* @_ZN1SIiED1Ev to void (i8*)*), i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i8* @__dso_handle) #[[ATTR4]]
1866 // CHECK11-NEXT:    store i8 1, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE3var to i8*), align 8
1867 // CHECK11-NEXT:    br label [[INIT_END3]]
1868 // CHECK11:       init.end3:
1869 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* @_ZZ5tmainIiET_vE5t_var, [2 x i32]* @_ZZ5tmainIiET_vE3vec, [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, %struct.S.0* @_ZZ5tmainIiET_vE3var)
1870 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* @_ZZ5tmainIiET_vE5t_var)
1871 // CHECK11-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1872 // CHECK11-NEXT:    ret i32 0
1873 //
1874 //
1875 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
1876 // CHECK11-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1877 // CHECK11-NEXT:  entry:
1878 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1879 // CHECK11-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1880 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1881 // CHECK11-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1882 // CHECK11-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 128
1883 // CHECK11-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
1884 // CHECK11-NEXT:    store float [[CONV]], float* [[F]], align 4
1885 // CHECK11-NEXT:    ret void
1886 //
1887 //
1888 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
1889 // CHECK11-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1890 // CHECK11-NEXT:  entry:
1891 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1892 // CHECK11-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1893 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1894 // CHECK11-NEXT:    ret void
1895 //
1896 //
1897 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
1898 // CHECK11-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1899 // CHECK11-NEXT:  entry:
1900 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1901 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1902 // CHECK11-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1903 // CHECK11-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1904 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1905 // CHECK11-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1906 // CHECK11-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1907 // CHECK11-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 128
1908 // CHECK11-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
1909 // CHECK11-NEXT:    [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
1910 // CHECK11-NEXT:    store float [[ADD]], float* [[F]], align 4
1911 // CHECK11-NEXT:    ret void
1912 //
1913 //
1914 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
1915 // CHECK11-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1916 // CHECK11-NEXT:  entry:
1917 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1918 // CHECK11-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1919 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1920 // CHECK11-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
1921 // CHECK11-NEXT:    ret void
1922 //
1923 //
1924 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIiEaSERKS0_
1925 // CHECK11-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR2]] comdat align 2 {
1926 // CHECK11-NEXT:  entry:
1927 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1928 // CHECK11-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
1929 // CHECK11-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1930 // CHECK11-NEXT:    store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
1931 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1932 // CHECK11-NEXT:    ret %struct.S.0* [[THIS1]]
1933 //
1934 //
1935 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
1936 // CHECK11-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1937 // CHECK11-NEXT:  entry:
1938 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1939 // CHECK11-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1940 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1941 // CHECK11-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
1942 // CHECK11-NEXT:    ret void
1943 //
1944 //
1945 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
1946 // CHECK11-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1947 // CHECK11-NEXT:  entry:
1948 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1949 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1950 // CHECK11-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1951 // CHECK11-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1952 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1953 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1954 // CHECK11-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
1955 // CHECK11-NEXT:    ret void
1956 //
1957 //
1958 // CHECK11-LABEL: define {{[^@]+}}@__cxx_global_array_dtor.2
1959 // CHECK11-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3]] section ".text.startup" {
1960 // CHECK11-NEXT:  entry:
1961 // CHECK11-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1962 // CHECK11-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1963 // CHECK11-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1964 // CHECK11:       arraydestroy.body:
1965 // CHECK11-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ getelementptr inbounds ([[STRUCT_S_0:%.*]], %struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1966 // CHECK11-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1967 // CHECK11-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1968 // CHECK11-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0)
1969 // CHECK11-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1970 // CHECK11:       arraydestroy.done1:
1971 // CHECK11-NEXT:    ret void
1972 //
1973 //
1974 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3
1975 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR5]] {
1976 // CHECK11-NEXT:  entry:
1977 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1978 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1979 // CHECK11-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
1980 // CHECK11-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
1981 // CHECK11-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
1982 // CHECK11-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
1983 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1984 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1985 // CHECK11-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
1986 // CHECK11-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
1987 // CHECK11-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
1988 // CHECK11-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
1989 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
1990 // CHECK11-NEXT:    [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
1991 // CHECK11-NEXT:    [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
1992 // CHECK11-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
1993 // CHECK11-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP0]] to i64
1994 // CHECK11-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], ptrtoint (i32* @_ZZ5tmainIiET_vE5t_var to i64)
1995 // CHECK11-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1996 // CHECK11:       copyin.not.master:
1997 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP0]], align 128
1998 // CHECK11-NEXT:    store i32 [[TMP6]], i32* @_ZZ5tmainIiET_vE5t_var, align 128
1999 // CHECK11-NEXT:    [[TMP7:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8*
2000 // CHECK11-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 bitcast ([2 x i32]* @_ZZ5tmainIiET_vE3vec to i8*), i8* align 128 [[TMP7]], i64 8, i1 false)
2001 // CHECK11-NEXT:    [[TMP8:%.*]] = bitcast [2 x %struct.S.0]* [[TMP2]] to %struct.S.0*
2002 // CHECK11-NEXT:    br i1 icmp eq (%struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), %struct.S.0* getelementptr ([[STRUCT_S_0:%.*]], %struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), i64 2)), label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2003 // CHECK11:       omp.arraycpy.body:
2004 // CHECK11-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP8]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2005 // CHECK11-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2006 // CHECK11-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
2007 // CHECK11-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2008 // CHECK11-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2009 // CHECK11-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], getelementptr ([[STRUCT_S_0]], %struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), i64 2)
2010 // CHECK11-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
2011 // CHECK11:       omp.arraycpy.done1:
2012 // CHECK11-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var, %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP3]])
2013 // CHECK11-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2014 // CHECK11:       copyin.not.master.end:
2015 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2016 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
2017 // CHECK11-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
2018 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* @_ZZ5tmainIiET_vE5t_var, align 128
2019 // CHECK11-NEXT:    store i32 [[TMP11]], i32* getelementptr inbounds ([2 x i32], [2 x i32]* @_ZZ5tmainIiET_vE3vec, i64 0, i64 0), align 128
2020 // CHECK11-NEXT:    [[CALL3:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 0), %struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var)
2021 // CHECK11-NEXT:    ret void
2022 //
2023 //
2024 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4
2025 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR5]] {
2026 // CHECK11-NEXT:  entry:
2027 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2028 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2029 // CHECK11-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
2030 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2031 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2032 // CHECK11-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
2033 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
2034 // CHECK11-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[TMP0]] to i64
2035 // CHECK11-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], ptrtoint (i32* @_ZZ5tmainIiET_vE5t_var to i64)
2036 // CHECK11-NEXT:    br i1 [[TMP2]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
2037 // CHECK11:       copyin.not.master:
2038 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 128
2039 // CHECK11-NEXT:    store i32 [[TMP3]], i32* @_ZZ5tmainIiET_vE5t_var, align 128
2040 // CHECK11-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2041 // CHECK11:       copyin.not.master.end:
2042 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2043 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
2044 // CHECK11-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
2045 // CHECK11-NEXT:    ret void
2046 //
2047 //
2048 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
2049 // CHECK11-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2050 // CHECK11-NEXT:  entry:
2051 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2052 // CHECK11-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2053 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2054 // CHECK11-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2055 // CHECK11-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 128
2056 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
2057 // CHECK11-NEXT:    ret void
2058 //
2059 //
2060 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
2061 // CHECK11-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2062 // CHECK11-NEXT:  entry:
2063 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2064 // CHECK11-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2065 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2066 // CHECK11-NEXT:    ret void
2067 //
2068 //
2069 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
2070 // CHECK11-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2071 // CHECK11-NEXT:  entry:
2072 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2073 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2074 // CHECK11-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2075 // CHECK11-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2076 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2077 // CHECK11-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2078 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2079 // CHECK11-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 128
2080 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
2081 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
2082 // CHECK11-NEXT:    ret void
2083 //
2084 //
2085 // CHECK11-LABEL: define {{[^@]+}}@_ZTW1g
2086 // CHECK11-SAME: () #[[ATTR8:[0-9]+]] comdat {
2087 // CHECK11-NEXT:    ret i32* @g
2088 //
2089 //
2090 // CHECK12-LABEL: define {{[^@]+}}@main
2091 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
2092 // CHECK12-NEXT:  entry:
2093 // CHECK12-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2094 // CHECK12-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
2095 // CHECK12-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
2096 // CHECK12-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2097 // CHECK12-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
2098 // CHECK12-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
2099 // CHECK12-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]], %struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
2100 // CHECK12-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4:[0-9]+]]
2101 // CHECK12-NEXT:    [[TMP0:%.*]] = load i8, i8* @_ZGVZ4mainE5s_arr, align 1
2102 // CHECK12-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
2103 // CHECK12-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2:![0-9]+]]
2104 // CHECK12:       init.check:
2105 // CHECK12-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 0), float 1.000000e+00)
2106 // CHECK12-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 1), float 2.000000e+00)
2107 // CHECK12-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR4]]
2108 // CHECK12-NEXT:    store i8 1, i8* @_ZGVZ4mainE5s_arr, align 1
2109 // CHECK12-NEXT:    br label [[INIT_END]]
2110 // CHECK12:       init.end:
2111 // CHECK12-NEXT:    [[TMP2:%.*]] = load i8, i8* @_ZGVZ4mainE3var, align 1
2112 // CHECK12-NEXT:    [[GUARD_UNINITIALIZED1:%.*]] = icmp eq i8 [[TMP2]], 0
2113 // CHECK12-NEXT:    br i1 [[GUARD_UNINITIALIZED1]], label [[INIT_CHECK2:%.*]], label [[INIT_END3:%.*]], !prof [[PROF2]]
2114 // CHECK12:       init.check2:
2115 // CHECK12-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var, float 3.000000e+00)
2116 // CHECK12-NEXT:    [[TMP3:%.*]] = call i32 @__cxa_thread_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @_ZZ4mainE3var to i8*), i8* @__dso_handle) #[[ATTR4]]
2117 // CHECK12-NEXT:    store i8 1, i8* @_ZGVZ4mainE3var, align 1
2118 // CHECK12-NEXT:    br label [[INIT_END3]]
2119 // CHECK12:       init.end3:
2120 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S]*, %struct.S*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @_ZZ4mainE5t_var, [2 x i32]* @_ZZ4mainE3vec, [2 x %struct.S]* @_ZZ4mainE5s_arr, %struct.S* @_ZZ4mainE3var)
2121 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* @_ZZ4mainE5t_var)
2122 // CHECK12-NEXT:    [[CALL4:%.*]] = call i32 @_Z5tmainIiET_v()
2123 // CHECK12-NEXT:    store i32 [[CALL4]], i32* [[RETVAL]], align 4
2124 // CHECK12-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
2125 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
2126 // CHECK12-NEXT:    ret i32 [[TMP4]]
2127 //
2128 //
2129 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
2130 // CHECK12-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
2131 // CHECK12-NEXT:  entry:
2132 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2133 // CHECK12-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2134 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2135 // CHECK12-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
2136 // CHECK12-NEXT:    ret void
2137 //
2138 //
2139 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIfEaSERKS0_
2140 // CHECK12-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 {
2141 // CHECK12-NEXT:  entry:
2142 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2143 // CHECK12-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
2144 // CHECK12-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2145 // CHECK12-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
2146 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2147 // CHECK12-NEXT:    ret %struct.S* [[THIS1]]
2148 //
2149 //
2150 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
2151 // CHECK12-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2152 // CHECK12-NEXT:  entry:
2153 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2154 // CHECK12-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2155 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2156 // CHECK12-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
2157 // CHECK12-NEXT:    ret void
2158 //
2159 //
2160 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
2161 // CHECK12-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2162 // CHECK12-NEXT:  entry:
2163 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2164 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
2165 // CHECK12-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2166 // CHECK12-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
2167 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2168 // CHECK12-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
2169 // CHECK12-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
2170 // CHECK12-NEXT:    ret void
2171 //
2172 //
2173 // CHECK12-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
2174 // CHECK12-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] section ".text.startup" {
2175 // CHECK12-NEXT:  entry:
2176 // CHECK12-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2177 // CHECK12-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2178 // CHECK12-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2179 // CHECK12:       arraydestroy.body:
2180 // CHECK12-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2181 // CHECK12-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2182 // CHECK12-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2183 // CHECK12-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0)
2184 // CHECK12-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
2185 // CHECK12:       arraydestroy.done1:
2186 // CHECK12-NEXT:    ret void
2187 //
2188 //
2189 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined.
2190 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR5:[0-9]+]] {
2191 // CHECK12-NEXT:  entry:
2192 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2193 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2194 // CHECK12-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
2195 // CHECK12-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
2196 // CHECK12-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
2197 // CHECK12-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
2198 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2199 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2200 // CHECK12-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
2201 // CHECK12-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
2202 // CHECK12-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
2203 // CHECK12-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
2204 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
2205 // CHECK12-NEXT:    [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
2206 // CHECK12-NEXT:    [[TMP2:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
2207 // CHECK12-NEXT:    [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
2208 // CHECK12-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP0]] to i64
2209 // CHECK12-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], ptrtoint (i32* @_ZZ4mainE5t_var to i64)
2210 // CHECK12-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
2211 // CHECK12:       copyin.not.master:
2212 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP0]], align 4
2213 // CHECK12-NEXT:    store i32 [[TMP6]], i32* @_ZZ4mainE5t_var, align 4
2214 // CHECK12-NEXT:    [[TMP7:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8*
2215 // CHECK12-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 bitcast ([2 x i32]* @_ZZ4mainE3vec to i8*), i8* align 4 [[TMP7]], i64 8, i1 false)
2216 // CHECK12-NEXT:    [[TMP8:%.*]] = bitcast [2 x %struct.S]* [[TMP2]] to %struct.S*
2217 // CHECK12-NEXT:    br i1 icmp eq (%struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), %struct.S* getelementptr ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), i64 2)), label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2218 // CHECK12:       omp.arraycpy.body:
2219 // CHECK12-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP8]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2220 // CHECK12-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2221 // CHECK12-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
2222 // CHECK12-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2223 // CHECK12-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2224 // CHECK12-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], getelementptr ([[STRUCT_S]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i32 0, i32 0), i64 2)
2225 // CHECK12-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
2226 // CHECK12:       omp.arraycpy.done1:
2227 // CHECK12-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var, %struct.S* nonnull align 4 dereferenceable(4) [[TMP3]])
2228 // CHECK12-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2229 // CHECK12:       copyin.not.master.end:
2230 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2231 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
2232 // CHECK12-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP10]])
2233 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* @_ZZ4mainE5t_var, align 4
2234 // CHECK12-NEXT:    store i32 [[TMP11]], i32* getelementptr inbounds ([2 x i32], [2 x i32]* @_ZZ4mainE3vec, i64 0, i64 0), align 4
2235 // CHECK12-NEXT:    [[CALL3:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEaSERKS0_(%struct.S* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @_ZZ4mainE5s_arr, i64 0, i64 0), %struct.S* nonnull align 4 dereferenceable(4) @_ZZ4mainE3var)
2236 // CHECK12-NEXT:    ret void
2237 //
2238 //
2239 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1
2240 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR5]] {
2241 // CHECK12-NEXT:  entry:
2242 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2243 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2244 // CHECK12-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
2245 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2246 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2247 // CHECK12-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
2248 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
2249 // CHECK12-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[TMP0]] to i64
2250 // CHECK12-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], ptrtoint (i32* @_ZZ4mainE5t_var to i64)
2251 // CHECK12-NEXT:    br i1 [[TMP2]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
2252 // CHECK12:       copyin.not.master:
2253 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
2254 // CHECK12-NEXT:    store i32 [[TMP3]], i32* @_ZZ4mainE5t_var, align 4
2255 // CHECK12-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2256 // CHECK12:       copyin.not.master.end:
2257 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2258 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
2259 // CHECK12-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
2260 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* @_ZZ4mainE5t_var, align 4
2261 // CHECK12-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP6]], 1
2262 // CHECK12-NEXT:    store i32 [[INC]], i32* @_ZZ4mainE5t_var, align 4
2263 // CHECK12-NEXT:    ret void
2264 //
2265 //
2266 // CHECK12-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
2267 // CHECK12-SAME: () #[[ATTR2]] comdat {
2268 // CHECK12-NEXT:  entry:
2269 // CHECK12-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
2270 // CHECK12-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
2271 // CHECK12-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
2272 // CHECK12-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]])
2273 // CHECK12-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]])
2274 // CHECK12-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
2275 // CHECK12-NEXT:    [[TMP0:%.*]] = load i8, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE5s_arr to i8*), align 8
2276 // CHECK12-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
2277 // CHECK12-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2]]
2278 // CHECK12:       init.check:
2279 // CHECK12-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 0), i32 1)
2280 // CHECK12-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 1), i32 2)
2281 // CHECK12-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(void (i8*)* @__cxx_global_array_dtor.2, i8* null, i8* @__dso_handle) #[[ATTR4]]
2282 // CHECK12-NEXT:    store i8 1, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE5s_arr to i8*), align 8
2283 // CHECK12-NEXT:    br label [[INIT_END]]
2284 // CHECK12:       init.end:
2285 // CHECK12-NEXT:    [[TMP2:%.*]] = load i8, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE3var to i8*), align 8
2286 // CHECK12-NEXT:    [[GUARD_UNINITIALIZED1:%.*]] = icmp eq i8 [[TMP2]], 0
2287 // CHECK12-NEXT:    br i1 [[GUARD_UNINITIALIZED1]], label [[INIT_CHECK2:%.*]], label [[INIT_END3:%.*]], !prof [[PROF2]]
2288 // CHECK12:       init.check2:
2289 // CHECK12-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var, i32 3)
2290 // CHECK12-NEXT:    [[TMP3:%.*]] = call i32 @__cxa_thread_atexit(void (i8*)* bitcast (void (%struct.S.0*)* @_ZN1SIiED1Ev to void (i8*)*), i8* bitcast (%struct.S.0* @_ZZ5tmainIiET_vE3var to i8*), i8* @__dso_handle) #[[ATTR4]]
2291 // CHECK12-NEXT:    store i8 1, i8* bitcast (i64* @_ZGVZ5tmainIiET_vE3var to i8*), align 8
2292 // CHECK12-NEXT:    br label [[INIT_END3]]
2293 // CHECK12:       init.end3:
2294 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* @_ZZ5tmainIiET_vE5t_var, [2 x i32]* @_ZZ5tmainIiET_vE3vec, [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, %struct.S.0* @_ZZ5tmainIiET_vE3var)
2295 // CHECK12-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* @_ZZ5tmainIiET_vE5t_var)
2296 // CHECK12-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
2297 // CHECK12-NEXT:    ret i32 0
2298 //
2299 //
2300 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
2301 // CHECK12-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2302 // CHECK12-NEXT:  entry:
2303 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2304 // CHECK12-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2305 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2306 // CHECK12-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2307 // CHECK12-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 128
2308 // CHECK12-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
2309 // CHECK12-NEXT:    store float [[CONV]], float* [[F]], align 4
2310 // CHECK12-NEXT:    ret void
2311 //
2312 //
2313 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
2314 // CHECK12-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2315 // CHECK12-NEXT:  entry:
2316 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2317 // CHECK12-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2318 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2319 // CHECK12-NEXT:    ret void
2320 //
2321 //
2322 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
2323 // CHECK12-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2324 // CHECK12-NEXT:  entry:
2325 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2326 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
2327 // CHECK12-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2328 // CHECK12-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
2329 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2330 // CHECK12-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2331 // CHECK12-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
2332 // CHECK12-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 128
2333 // CHECK12-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
2334 // CHECK12-NEXT:    [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
2335 // CHECK12-NEXT:    store float [[ADD]], float* [[F]], align 4
2336 // CHECK12-NEXT:    ret void
2337 //
2338 //
2339 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
2340 // CHECK12-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2341 // CHECK12-NEXT:  entry:
2342 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2343 // CHECK12-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2344 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2345 // CHECK12-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
2346 // CHECK12-NEXT:    ret void
2347 //
2348 //
2349 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIiEaSERKS0_
2350 // CHECK12-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR2]] comdat align 2 {
2351 // CHECK12-NEXT:  entry:
2352 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2353 // CHECK12-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
2354 // CHECK12-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2355 // CHECK12-NEXT:    store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
2356 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2357 // CHECK12-NEXT:    ret %struct.S.0* [[THIS1]]
2358 //
2359 //
2360 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
2361 // CHECK12-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2362 // CHECK12-NEXT:  entry:
2363 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2364 // CHECK12-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2365 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2366 // CHECK12-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
2367 // CHECK12-NEXT:    ret void
2368 //
2369 //
2370 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
2371 // CHECK12-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2372 // CHECK12-NEXT:  entry:
2373 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2374 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2375 // CHECK12-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2376 // CHECK12-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2377 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2378 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2379 // CHECK12-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
2380 // CHECK12-NEXT:    ret void
2381 //
2382 //
2383 // CHECK12-LABEL: define {{[^@]+}}@__cxx_global_array_dtor.2
2384 // CHECK12-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3]] section ".text.startup" {
2385 // CHECK12-NEXT:  entry:
2386 // CHECK12-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2387 // CHECK12-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2388 // CHECK12-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2389 // CHECK12:       arraydestroy.body:
2390 // CHECK12-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ getelementptr inbounds ([[STRUCT_S_0:%.*]], %struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2391 // CHECK12-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2392 // CHECK12-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2393 // CHECK12-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0)
2394 // CHECK12-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
2395 // CHECK12:       arraydestroy.done1:
2396 // CHECK12-NEXT:    ret void
2397 //
2398 //
2399 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3
2400 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR5]] {
2401 // CHECK12-NEXT:  entry:
2402 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2403 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2404 // CHECK12-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
2405 // CHECK12-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
2406 // CHECK12-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
2407 // CHECK12-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
2408 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2409 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2410 // CHECK12-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
2411 // CHECK12-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
2412 // CHECK12-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
2413 // CHECK12-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
2414 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
2415 // CHECK12-NEXT:    [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
2416 // CHECK12-NEXT:    [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
2417 // CHECK12-NEXT:    [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
2418 // CHECK12-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP0]] to i64
2419 // CHECK12-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], ptrtoint (i32* @_ZZ5tmainIiET_vE5t_var to i64)
2420 // CHECK12-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
2421 // CHECK12:       copyin.not.master:
2422 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP0]], align 128
2423 // CHECK12-NEXT:    store i32 [[TMP6]], i32* @_ZZ5tmainIiET_vE5t_var, align 128
2424 // CHECK12-NEXT:    [[TMP7:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8*
2425 // CHECK12-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 bitcast ([2 x i32]* @_ZZ5tmainIiET_vE3vec to i8*), i8* align 128 [[TMP7]], i64 8, i1 false)
2426 // CHECK12-NEXT:    [[TMP8:%.*]] = bitcast [2 x %struct.S.0]* [[TMP2]] to %struct.S.0*
2427 // CHECK12-NEXT:    br i1 icmp eq (%struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), %struct.S.0* getelementptr ([[STRUCT_S_0:%.*]], %struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), i64 2)), label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2428 // CHECK12:       omp.arraycpy.body:
2429 // CHECK12-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP8]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2430 // CHECK12-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2431 // CHECK12-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
2432 // CHECK12-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2433 // CHECK12-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2434 // CHECK12-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], getelementptr ([[STRUCT_S_0]], %struct.S.0* getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i32 0, i32 0), i64 2)
2435 // CHECK12-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
2436 // CHECK12:       omp.arraycpy.done1:
2437 // CHECK12-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var, %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP3]])
2438 // CHECK12-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2439 // CHECK12:       copyin.not.master.end:
2440 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2441 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
2442 // CHECK12-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
2443 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* @_ZZ5tmainIiET_vE5t_var, align 128
2444 // CHECK12-NEXT:    store i32 [[TMP11]], i32* getelementptr inbounds ([2 x i32], [2 x i32]* @_ZZ5tmainIiET_vE3vec, i64 0, i64 0), align 128
2445 // CHECK12-NEXT:    [[CALL3:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEaSERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S.0], [2 x %struct.S.0]* @_ZZ5tmainIiET_vE5s_arr, i64 0, i64 0), %struct.S.0* nonnull align 4 dereferenceable(4) @_ZZ5tmainIiET_vE3var)
2446 // CHECK12-NEXT:    ret void
2447 //
2448 //
2449 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..4
2450 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR5]] {
2451 // CHECK12-NEXT:  entry:
2452 // CHECK12-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2453 // CHECK12-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2454 // CHECK12-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
2455 // CHECK12-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2456 // CHECK12-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2457 // CHECK12-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
2458 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
2459 // CHECK12-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[TMP0]] to i64
2460 // CHECK12-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], ptrtoint (i32* @_ZZ5tmainIiET_vE5t_var to i64)
2461 // CHECK12-NEXT:    br i1 [[TMP2]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
2462 // CHECK12:       copyin.not.master:
2463 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 128
2464 // CHECK12-NEXT:    store i32 [[TMP3]], i32* @_ZZ5tmainIiET_vE5t_var, align 128
2465 // CHECK12-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2466 // CHECK12:       copyin.not.master.end:
2467 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2468 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
2469 // CHECK12-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
2470 // CHECK12-NEXT:    ret void
2471 //
2472 //
2473 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
2474 // CHECK12-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2475 // CHECK12-NEXT:  entry:
2476 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2477 // CHECK12-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2478 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2479 // CHECK12-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2480 // CHECK12-NEXT:    [[TMP0:%.*]] = load volatile i32, i32* @g, align 128
2481 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
2482 // CHECK12-NEXT:    ret void
2483 //
2484 //
2485 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
2486 // CHECK12-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2487 // CHECK12-NEXT:  entry:
2488 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2489 // CHECK12-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2490 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2491 // CHECK12-NEXT:    ret void
2492 //
2493 //
2494 // CHECK12-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
2495 // CHECK12-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2496 // CHECK12-NEXT:  entry:
2497 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2498 // CHECK12-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2499 // CHECK12-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2500 // CHECK12-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2501 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2502 // CHECK12-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2503 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2504 // CHECK12-NEXT:    [[TMP1:%.*]] = load volatile i32, i32* @g, align 128
2505 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
2506 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
2507 // CHECK12-NEXT:    ret void
2508 //
2509 //
2510 // CHECK12-LABEL: define {{[^@]+}}@_ZTW1g
2511 // CHECK12-SAME: () #[[ATTR8:[0-9]+]] comdat {
2512 // CHECK12-NEXT:    ret i32* @g
2513 //
2514 //
2515 // CHECK13-LABEL: define {{[^@]+}}@main
2516 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
2517 // CHECK13-NEXT:  entry:
2518 // CHECK13-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2519 // CHECK13-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
2520 // CHECK13-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2521 // CHECK13-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
2522 // CHECK13-NEXT:    ret i32 0
2523 //
2524 //
2525 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined.
2526 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR2:[0-9]+]] {
2527 // CHECK13-NEXT:  entry:
2528 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2529 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2530 // CHECK13-NEXT:    [[G_ADDR:%.*]] = alloca i32*, align 8
2531 // CHECK13-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 1
2532 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2533 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2534 // CHECK13-NEXT:    store i32* [[G]], i32** [[G_ADDR]], align 8
2535 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
2536 // CHECK13-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[TMP0]] to i64
2537 // CHECK13-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], ptrtoint (i32* @g to i64)
2538 // CHECK13-NEXT:    br i1 [[TMP2]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
2539 // CHECK13:       copyin.not.master:
2540 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 128
2541 // CHECK13-NEXT:    store volatile i32 [[TMP3]], i32* @g, align 128
2542 // CHECK13-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2543 // CHECK13:       copyin.not.master.end:
2544 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2545 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
2546 // CHECK13-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]])
2547 // CHECK13-NEXT:    store volatile i32 1, i32* @g, align 128
2548 // CHECK13-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 1 dereferenceable(1) [[REF_TMP]])
2549 // CHECK13-NEXT:    ret void
2550 //
2551 //
2552 // CHECK13-LABEL: define {{[^@]+}}@_ZTW1g
2553 // CHECK13-SAME: () #[[ATTR5:[0-9]+]] comdat {
2554 // CHECK13-NEXT:    ret i32* @g
2555 //
2556 //
2557 // CHECK14-LABEL: define {{[^@]+}}@main
2558 // CHECK14-SAME: () #[[ATTR1:[0-9]+]] {
2559 // CHECK14-NEXT:  entry:
2560 // CHECK14-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2561 // CHECK14-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2562 // CHECK14-NEXT:    [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
2563 // CHECK14-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
2564 // CHECK14-NEXT:    call void [[TMP1]](i8* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
2565 // CHECK14-NEXT:    ret i32 0
2566 //
2567 //
2568 // CHECK14-LABEL: define {{[^@]+}}@__main_block_invoke
2569 // CHECK14-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
2570 // CHECK14-NEXT:  entry:
2571 // CHECK14-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
2572 // CHECK14-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
2573 // CHECK14-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2574 // CHECK14-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
2575 // CHECK14-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
2576 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @g)
2577 // CHECK14-NEXT:    ret void
2578 //
2579 //
2580 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined.
2581 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR3:[0-9]+]] {
2582 // CHECK14-NEXT:  entry:
2583 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2584 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2585 // CHECK14-NEXT:    [[G_ADDR:%.*]] = alloca i32*, align 8
2586 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2587 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2588 // CHECK14-NEXT:    store i32* [[G]], i32** [[G_ADDR]], align 8
2589 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
2590 // CHECK14-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[TMP0]] to i64
2591 // CHECK14-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], ptrtoint (i32* @g to i64)
2592 // CHECK14-NEXT:    br i1 [[TMP2]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
2593 // CHECK14:       copyin.not.master:
2594 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 128
2595 // CHECK14-NEXT:    store volatile i32 [[TMP3]], i32* @g, align 128
2596 // CHECK14-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2597 // CHECK14:       copyin.not.master.end:
2598 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2599 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
2600 // CHECK14-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]])
2601 // CHECK14-NEXT:    store volatile i32 1, i32* @g, align 128
2602 // CHECK14-NEXT:    [[TMP6:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global.2 to %struct.__block_literal_generic*), i32 0, i32 3), align 8
2603 // CHECK14-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to void (i8*)*
2604 // CHECK14-NEXT:    call void [[TMP7]](i8* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global.2 to i8*))
2605 // CHECK14-NEXT:    ret void
2606 //
2607 //
2608 // CHECK14-LABEL: define {{[^@]+}}@g_block_invoke
2609 // CHECK14-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
2610 // CHECK14-NEXT:  entry:
2611 // CHECK14-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
2612 // CHECK14-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
2613 // CHECK14-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2614 // CHECK14-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
2615 // CHECK14-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
2616 // CHECK14-NEXT:    store volatile i32 2, i32* @g, align 128
2617 // CHECK14-NEXT:    ret void
2618 //
2619 //
2620 // CHECK14-LABEL: define {{[^@]+}}@_ZTW1g
2621 // CHECK14-SAME: () #[[ATTR6:[0-9]+]] comdat {
2622 // CHECK14-NEXT:    ret i32* @g
2623 //
2624 //
2625 // CHECK15-LABEL: define {{[^@]+}}@_Z10array_funcv
2626 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] {
2627 // CHECK15-NEXT:  entry:
2628 // CHECK15-NEXT:    [[TMP0:%.*]] = load i8, i8* @_ZGVZ10array_funcvE1s, align 1
2629 // CHECK15-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
2630 // CHECK15-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !prof [[PROF2:![0-9]+]]
2631 // CHECK15:       init.check:
2632 // CHECK15-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
2633 // CHECK15:       arrayctor.loop:
2634 // CHECK15-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.St* [ getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), [[INIT_CHECK]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
2635 // CHECK15-NEXT:    call void @_ZN2StC1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[ARRAYCTOR_CUR]])
2636 // CHECK15-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[ARRAYCTOR_CUR]], i64 1
2637 // CHECK15-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.St* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([[STRUCT_ST]], %struct.St* getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), i64 2)
2638 // CHECK15-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
2639 // CHECK15:       arrayctor.cont:
2640 // CHECK15-NEXT:    [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR3:[0-9]+]]
2641 // CHECK15-NEXT:    store i8 1, i8* @_ZGVZ10array_funcvE1s, align 1
2642 // CHECK15-NEXT:    br label [[INIT_END]]
2643 // CHECK15:       init.end:
2644 // CHECK15-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, [2 x %struct.St]*)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* @_ZZ10array_funcvE1a, [2 x %struct.St]* @_ZZ10array_funcvE1s)
2645 // CHECK15-NEXT:    ret void
2646 //
2647 //
2648 // CHECK15-LABEL: define {{[^@]+}}@_ZN2StC1Ev
2649 // CHECK15-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
2650 // CHECK15-NEXT:  entry:
2651 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
2652 // CHECK15-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
2653 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
2654 // CHECK15-NEXT:    call void @_ZN2StC2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]])
2655 // CHECK15-NEXT:    ret void
2656 //
2657 //
2658 // CHECK15-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
2659 // CHECK15-SAME: (i8* [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] section ".text.startup" {
2660 // CHECK15-NEXT:  entry:
2661 // CHECK15-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2662 // CHECK15-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2663 // CHECK15-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2664 // CHECK15:       arraydestroy.body:
2665 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.St* [ getelementptr inbounds ([[STRUCT_ST:%.*]], %struct.St* getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2666 // CHECK15-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2667 // CHECK15-NEXT:    call void @_ZN2StD1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
2668 // CHECK15-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.St* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0)
2669 // CHECK15-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
2670 // CHECK15:       arraydestroy.done1:
2671 // CHECK15-NEXT:    ret void
2672 //
2673 //
2674 // CHECK15-LABEL: define {{[^@]+}}@_ZN2StD1Ev
2675 // CHECK15-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2676 // CHECK15-NEXT:  entry:
2677 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
2678 // CHECK15-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
2679 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
2680 // CHECK15-NEXT:    call void @_ZN2StD2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]]
2681 // CHECK15-NEXT:    ret void
2682 //
2683 //
2684 // CHECK15-LABEL: define {{[^@]+}}@.omp_outlined.
2685 // CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[A:%.*]], [2 x %struct.St]* nonnull align 4 dereferenceable(16) [[S:%.*]]) #[[ATTR4:[0-9]+]] {
2686 // CHECK15-NEXT:  entry:
2687 // CHECK15-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2688 // CHECK15-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2689 // CHECK15-NEXT:    [[A_ADDR:%.*]] = alloca [2 x i32]*, align 8
2690 // CHECK15-NEXT:    [[S_ADDR:%.*]] = alloca [2 x %struct.St]*, align 8
2691 // CHECK15-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2692 // CHECK15-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2693 // CHECK15-NEXT:    store [2 x i32]* [[A]], [2 x i32]** [[A_ADDR]], align 8
2694 // CHECK15-NEXT:    store [2 x %struct.St]* [[S]], [2 x %struct.St]** [[S_ADDR]], align 8
2695 // CHECK15-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[A_ADDR]], align 8
2696 // CHECK15-NEXT:    [[TMP1:%.*]] = load [2 x %struct.St]*, [2 x %struct.St]** [[S_ADDR]], align 8
2697 // CHECK15-NEXT:    [[TMP2:%.*]] = ptrtoint [2 x i32]* [[TMP0]] to i64
2698 // CHECK15-NEXT:    [[TMP3:%.*]] = icmp ne i64 [[TMP2]], ptrtoint ([2 x i32]* @_ZZ10array_funcvE1a to i64)
2699 // CHECK15-NEXT:    br i1 [[TMP3]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
2700 // CHECK15:       copyin.not.master:
2701 // CHECK15-NEXT:    [[TMP4:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
2702 // CHECK15-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 bitcast ([2 x i32]* @_ZZ10array_funcvE1a to i8*), i8* align 4 [[TMP4]], i64 8, i1 false)
2703 // CHECK15-NEXT:    [[TMP5:%.*]] = bitcast [2 x %struct.St]* [[TMP1]] to %struct.St*
2704 // CHECK15-NEXT:    br i1 icmp eq (%struct.St* getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), %struct.St* getelementptr ([[STRUCT_ST:%.*]], %struct.St* getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), i64 2)), label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
2705 // CHECK15:       omp.arraycpy.body:
2706 // CHECK15-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.St* [ [[TMP5]], [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2707 // CHECK15-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.St* [ getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), [[COPYIN_NOT_MASTER]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
2708 // CHECK15-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.St* @_ZN2StaSERKS_(%struct.St* nonnull align 4 dereferenceable(8) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.St* nonnull align 4 dereferenceable(8) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
2709 // CHECK15-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_ST]], %struct.St* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
2710 // CHECK15-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_ST]], %struct.St* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
2711 // CHECK15-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.St* [[OMP_ARRAYCPY_DEST_ELEMENT]], getelementptr ([[STRUCT_ST]], %struct.St* getelementptr inbounds ([2 x %struct.St], [2 x %struct.St]* @_ZZ10array_funcvE1s, i32 0, i32 0), i64 2)
2712 // CHECK15-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
2713 // CHECK15:       omp.arraycpy.done1:
2714 // CHECK15-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2715 // CHECK15:       copyin.not.master.end:
2716 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2717 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
2718 // CHECK15-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]])
2719 // CHECK15-NEXT:    ret void
2720 //
2721 //
2722 // CHECK15-LABEL: define {{[^@]+}}@_ZN2StaSERKS_
2723 // CHECK15-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]], %struct.St* nonnull align 4 dereferenceable(8) [[TMP0:%.*]]) #[[ATTR0]] comdat align 2 {
2724 // CHECK15-NEXT:  entry:
2725 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
2726 // CHECK15-NEXT:    [[DOTADDR:%.*]] = alloca %struct.St*, align 8
2727 // CHECK15-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
2728 // CHECK15-NEXT:    store %struct.St* [[TMP0]], %struct.St** [[DOTADDR]], align 8
2729 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
2730 // CHECK15-NEXT:    ret %struct.St* [[THIS1]]
2731 //
2732 //
2733 // CHECK15-LABEL: define {{[^@]+}}@_ZN2StC2Ev
2734 // CHECK15-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2735 // CHECK15-NEXT:  entry:
2736 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
2737 // CHECK15-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
2738 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
2739 // CHECK15-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
2740 // CHECK15-NEXT:    store i32 0, i32* [[A]], align 4
2741 // CHECK15-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
2742 // CHECK15-NEXT:    store i32 0, i32* [[B]], align 4
2743 // CHECK15-NEXT:    ret void
2744 //
2745 //
2746 // CHECK15-LABEL: define {{[^@]+}}@_ZN2StD2Ev
2747 // CHECK15-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
2748 // CHECK15-NEXT:  entry:
2749 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
2750 // CHECK15-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
2751 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
2752 // CHECK15-NEXT:    ret void
2753 //
2754 //
2755 // CHECK16-LABEL: define {{[^@]+}}@__cxx_global_var_init
2756 // CHECK16-SAME: () #[[ATTR0:[0-9]+]] section ".text.startup" {
2757 // CHECK16-NEXT:  entry:
2758 // CHECK16-NEXT:    [[CALL:%.*]] = call i32 @_Z6t_initv()
2759 // CHECK16-NEXT:    store i32 [[CALL]], i32* @t, align 4
2760 // CHECK16-NEXT:    ret void
2761 //
2762 //
2763 // CHECK16-LABEL: define {{[^@]+}}@_Z3foov
2764 // CHECK16-SAME: () #[[ATTR2:[0-9]+]] {
2765 // CHECK16-NEXT:  entry:
2766 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2767 // CHECK16-NEXT:    ret void
2768 //
2769 //
2770 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined.
2771 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
2772 // CHECK16-NEXT:  entry:
2773 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2774 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2775 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2776 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2777 // CHECK16-NEXT:    [[TMP0:%.*]] = call i32* @_ZTW1t()
2778 // CHECK16-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[TMP0]])
2779 // CHECK16-NEXT:    ret void
2780 //
2781 //
2782 // CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..1
2783 // CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T:%.*]]) #[[ATTR3]] {
2784 // CHECK16-NEXT:  entry:
2785 // CHECK16-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2786 // CHECK16-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2787 // CHECK16-NEXT:    [[T_ADDR:%.*]] = alloca i32*, align 8
2788 // CHECK16-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2789 // CHECK16-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2790 // CHECK16-NEXT:    store i32* [[T]], i32** [[T_ADDR]], align 8
2791 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_ADDR]], align 8
2792 // CHECK16-NEXT:    [[TMP1:%.*]] = call i32* @_ZTW1t()
2793 // CHECK16-NEXT:    [[TMP2:%.*]] = ptrtoint i32* [[TMP0]] to i64
2794 // CHECK16-NEXT:    [[TMP3:%.*]] = ptrtoint i32* [[TMP1]] to i64
2795 // CHECK16-NEXT:    [[TMP4:%.*]] = icmp ne i64 [[TMP2]], [[TMP3]]
2796 // CHECK16-NEXT:    br i1 [[TMP4]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
2797 // CHECK16:       copyin.not.master:
2798 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
2799 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[TMP1]], align 4
2800 // CHECK16-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
2801 // CHECK16:       copyin.not.master.end:
2802 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2803 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
2804 // CHECK16-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]])
2805 // CHECK16-NEXT:    [[TMP8:%.*]] = call i32* @_ZTW1t()
2806 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2807 // CHECK16-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP9]], 1
2808 // CHECK16-NEXT:    store i32 [[INC]], i32* [[TMP8]], align 4
2809 // CHECK16-NEXT:    ret void
2810 //
2811 //
2812 // CHECK16-LABEL: define {{[^@]+}}@_ZTW1t
2813 // CHECK16-SAME: () #[[ATTR4:[0-9]+]] comdat {
2814 // CHECK16-NEXT:    call void @_ZTH1t()
2815 // CHECK16-NEXT:    ret i32* @t
2816 //
2817 //
2818 // CHECK16-LABEL: define {{[^@]+}}@__tls_init
2819 // CHECK16-SAME: () #[[ATTR0]] {
2820 // CHECK16-NEXT:  entry:
2821 // CHECK16-NEXT:    [[TMP0:%.*]] = load i8, i8* @__tls_guard, align 1
2822 // CHECK16-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
2823 // CHECK16-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !prof [[PROF4:![0-9]+]]
2824 // CHECK16:       init:
2825 // CHECK16-NEXT:    store i8 1, i8* @__tls_guard, align 1
2826 // CHECK16-NEXT:    call void @__cxx_global_var_init()
2827 // CHECK16-NEXT:    br label [[EXIT]]
2828 // CHECK16:       exit:
2829 // CHECK16-NEXT:    ret void
2830 //
2831 //