1 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 // RUN: %clang_cc1 -verify -fopenmp -DOMP5 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=OMP50
7 // RUN: %clang_cc1 -fopenmp -DOMP5 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
8 // RUN: %clang_cc1 -fopenmp -DOMP5 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=CHECK -check-prefix=OMP50
9 // RUN: %clang_cc1 -verify -fopenmp -DOMP5 -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
10 // RUN: %clang_cc1 -verify -fopenmp -DOMP5 -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
11 
12 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
13 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
14 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
15 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
16 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
17 // RUN: %clang_cc1 -verify -fopenmp-simd -DOMP5 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
18 // RUN: %clang_cc1 -fopenmp-simd -DOMP5 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
19 // RUN: %clang_cc1 -fopenmp-simd -DOMP5 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
20 // RUN: %clang_cc1 -verify -fopenmp-simd -DOMP5 -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
21 // RUN: %clang_cc1 -verify -fopenmp-simd -DOMP5 -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
22 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
23 // expected-no-diagnostics
24 #ifndef HEADER
25 #define HEADER
26 
27 #ifdef OMP5
28 #define CONDITIONAL conditional :
29 #else
30 #define CONDITIONAL
31 #endif //OMP5
32 
33 enum omp_allocator_handle_t {
34   omp_null_allocator = 0,
35   omp_default_mem_alloc = 1,
36   omp_large_cap_mem_alloc = 2,
37   omp_const_mem_alloc = 3,
38   omp_high_bw_mem_alloc = 4,
39   omp_low_lat_mem_alloc = 5,
40   omp_cgroup_mem_alloc = 6,
41   omp_pteam_mem_alloc = 7,
42   omp_thread_mem_alloc = 8,
43   KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__
44 };
45 
46 struct SS {
47   int a;
48   char e[4];
49   int b : 4;
50   int &c;
51   SS(int &d) : a(0), b(0), c(d) {
52 #pragma omp parallel
53 #pragma omp for firstprivate(e) lastprivate(a, b, c, e)
54     for (int i = 0; i < 2; ++i)
55 #ifdef LAMBDA
56       [&]() {
57         ++this->a, --b, (this)->c /= 1;
58 #pragma omp parallel
59 #pragma omp for lastprivate(a, b, c)
60         for (int i = 0; i < 2; ++i)
61           ++(this)->a, --b, this->c /= 1;
62       }();
63 #elif defined(BLOCKS)
64       ^{
65         ++a;
66         --this->b;
67         (this)->c /= 1;
68 #pragma omp parallel
69 #pragma omp for lastprivate(a, b, c)
70         for (int i = 0; i < 2; ++i)
71           ++(this)->a, --b, this->c /= 1;
72       }();
73 #else
74       ++this->a, --b, c /= 1;
75 #endif
76 #pragma omp for
77     for (a = 0; a < 2; ++a)
78 #ifdef LAMBDA
79       [&]() {
80         --this->a, ++b, (this)->c *= 2;
81 #pragma omp parallel
82 #pragma omp for lastprivate(b)
83         for (b = 0; b < 2; ++b)
84           ++(this)->a, --b, this->c /= 1;
85       }();
86 #elif defined(BLOCKS)
87       ^{
88         ++a;
89         --this->b;
90         (this)->c /= 1;
91 #pragma omp parallel
92 #pragma omp for
93         for (c = 0; c < 2; ++c)
94           ++(this)->a, --b, this->c /= 1;
95       }();
96 #else
97       ++this->a, --b, c /= 1;
98 #endif
99   }
100 };
101 
102 template <typename T>
103 struct SST {
104   T a;
105   SST() : a(T()) {
106 #pragma omp parallel
107 #pragma omp for lastprivate(a)
108     for (int i = 0; i < 2; ++i)
109 #ifdef LAMBDA
110       [&]() {
111         [&]() {
112           ++this->a;
113 #pragma omp parallel
114 #pragma omp for lastprivate(a)
115           for (int i = 0; i < 2; ++i)
116             ++(this)->a;
117         }();
118       }();
119 #elif defined(BLOCKS)
120       ^{
121         ^{
122           ++a;
123 #pragma omp parallel
124 #pragma omp for lastprivate(a)
125           for (int i = 0; i < 2; ++i)
126             ++(this)->a;
127         }();
128       }();
129 #else
130       ++(this)->a;
131 #endif
132 #pragma omp for
133     for (a = 0; a < 2; ++a)
134 #ifdef LAMBDA
135       [&]() {
136         ++this->a;
137 #pragma omp parallel
138 #pragma omp for
139         for (a = 0; a < 2; ++(this)->a)
140           ++(this)->a;
141       }();
142 #elif defined(BLOCKS)
143       ^{
144         ++a;
145 #pragma omp parallel
146 #pragma omp for
147         for (this->a = 0; a < 2; ++a)
148           ++(this)->a;
149       }();
150 #else
151       ++(this)->a;
152 #endif
153   }
154 };
155 
156 template <class T>
157 struct S {
158   T f;
159   S(T a) : f(a) {}
160   S() : f() {}
161   S<T> &operator=(const S<T> &);
162   operator T() { return T(); }
163   ~S() {}
164 };
165 
166 volatile int g __attribute__((aligned(128)))= 1212;
167 volatile int &g1 = g;
168 float f;
169 char cnt;
170 
171 // CHECK: [[SS_TY:%.+]] = type { i{{[0-9]+}}, [4 x i8], i8
172 // LAMBDA: [[SS_TY:%.+]] = type { i{{[0-9]+}}, [4 x i8], i8
173 // BLOCKS: [[SS_TY:%.+]] = type { i{{[0-9]+}}, [4 x i8], i8
174 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
175 // CHECK: [[S_INT_TY:%.+]] = type { i32 }
176 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
177 // CHECK-DAG: [[X:@.+]] ={{.*}} global double 0.0
178 // CHECK-DAG: [[F:@.+]] ={{.*}} global float 0.0
179 // CHECK-DAG: [[CNT:@.+]] ={{.*}} global i8 0
180 // OMP50-DAG: [[LAST_IV_F:@.+]] = {{.*}}common global i32 0
181 // OMP50-DAG: [[LAST_F:@.+]] = {{.*}}common global float 0.000000e+00,
182 
183 template <typename T>
184 T tmain() {
185   S<T> test;
186   SST<T> sst;
187   T t_var __attribute__((aligned(128))) = T();
188   T vec[] __attribute__((aligned(128))) = {1, 2};
189   S<T> s_arr[] __attribute__((aligned(128))) = {1, 2};
190   S<T> &var __attribute__((aligned(128))) = test;
191 #pragma omp parallel
192 #pragma omp for lastprivate(t_var, vec, s_arr, var)
193   for (int i = 0; i < 2; ++i) {
194     vec[i] = t_var;
195     s_arr[i] = var;
196   }
197   return T();
198 }
199 
200 namespace A {
201 double x;
202 }
203 namespace B {
204 using A::x;
205 }
206 
207 int main() {
208   static int sivar;
209   SS ss(sivar);
210 #ifdef LAMBDA
211   // LAMBDA: [[G:@.+]] ={{.*}} global i{{[0-9]+}} 1212,
212   // LAMBDA: [[SIVAR:@.+]] = internal global i{{[0-9]+}} 0,
213   // LAMBDA-LABEL: @main
214   // LAMBDA: alloca [[SS_TY]],
215   // LAMBDA: alloca [[CAP_TY:%.+]],
216   // FIXME: The outer lambda should not capture 'sivar'; that capture is not
217   // used for anything.
218   // LAMBDA: store {{.*}}@_ZZ4mainE5sivar,
219   // LAMBDA: call void [[OUTER_LAMBDA:@.+]]([[CAP_TY]]*
220   [&]() {
221   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
222   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* @_ZZ4mainE5sivar)
223 #pragma omp parallel
224 #pragma omp for lastprivate(g, g1, sivar)
225   for (int i = 0; i < 2; ++i) {
226     // LAMBDA: define {{.+}} @{{.+}}([[SS_TY]]*
227     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
228     // LAMBDA: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
229     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
230     // LAMBDA: store i8
231     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 3
232     // LAMBDA: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
233     // LAMBDA: call void @__kmpc_for_static_init_4(
234     // LAMBDA-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
235     // LAMBDA: call{{.*}} void [[SS_LAMBDA1:@[^ ]+]]
236     // LAMBDA: call void @__kmpc_for_static_fini(%
237     // LAMBDA: ret
238 
239     // LAMBDA: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
240     // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
241     // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
242     // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 3
243     // LAMBDA: call void @__kmpc_for_static_init_4(
244     // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
245     // LAMBDA: call{{.*}} void [[SS_LAMBDA:@[^ ]+]]
246     // LAMBDA: call void @__kmpc_for_static_fini(
247     // LAMBDA: br i1
248     // LAMBDA: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
249     // LAMBDA: store i8 %{{.+}}, i8* [[B_REF]],
250     // LAMBDA: br label
251     // LAMBDA: ret void
252 
253     // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
254     // LAMBDA: alloca i{{[0-9]+}},
255     // LAMBDA: alloca i{{[0-9]+}},
256     // LAMBDA: alloca i{{[0-9]+}},
257     // LAMBDA: alloca i{{[0-9]+}},
258     // LAMBDA: alloca i{{[0-9]+}},
259     // LAMBDA: alloca i{{[0-9]+}},
260     // LAMBDA: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
261     // LAMBDA: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
262     // LAMBDA: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
263     // LAMBDA: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
264     // LAMBDA: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
265     // LAMBDA: call void @__kmpc_for_static_init_4(
266     // LAMBDA: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
267     // LAMBDA-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
268     // LAMBDA-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
269     // LAMBDA-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
270     // LAMBDA-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
271     // LAMBDA-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
272     // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
273     // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
274     // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
275     // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
276     // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
277     // LAMBDA: call void @__kmpc_for_static_fini(
278     // LAMBDA: br i1
279     // LAMBDA: br label
280     // LAMBDA: ret void
281 
282     // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
283     // LAMBDA: ret void
284 
285     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.+]])
286     // LAMBDA: alloca i{{[0-9]+}},
287     // LAMBDA: alloca i{{[0-9]+}},
288     // LAMBDA: alloca i{{[0-9]+}},
289     // LAMBDA: alloca i{{[0-9]+}},
290     // LAMBDA: alloca i{{[0-9]+}},
291     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
292     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
293     // LAMBDA: [[G1_PRIVATE_REF:%.+]] = alloca i{{[0-9]+}}*,
294     // LAMBDA: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
295     // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}},
296 
297     // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
298     // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
299 
300     // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
301     // LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
302     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G1_PRIVATE_REF]],
303     // LAMBDA: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}* [[G1_PRIVATE_ADDR]],
304     // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
305     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
306     // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
307     // LAMBDA: [[G1_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
308     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G1_PRIVATE_REF]],
309     // LAMBDA: store i{{[0-9]+}}* [[G1_PRIVATE_ADDR]], i{{[0-9]+}}** [[G1_PRIVATE_ADDR_REF]]
310     // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
311     // LAMBDA: store i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]], i{{[0-9]+}}** [[SIVAR_PRIVATE_ADDR_REF]]
312     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* {{[^,]*}} [[ARG]])
313     // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
314     g = 1;
315     g1 = 1;
316     sivar = 2;
317     // Check for final copying of private values back to original vars.
318     // LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
319     // LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
320     // LAMBDA: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
321     // LAMBDA: [[LAST_THEN]]
322     // Actual copying.
323 
324     // original g=private_g;
325     // LAMBDA: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
326     // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
327 
328     // original sivar=private_sivar;
329     // LAMBDA: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
330     // LAMBDA: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}},
331     // LAMBDA: br label %[[LAST_DONE]]
332     // LAMBDA: [[LAST_DONE]]
333     // LAMBDA: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
334     [&]() {
335       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* {{[^,]*}} [[ARG_PTR:%.+]])
336       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
337       g = 2;
338       g1 = 2;
339       sivar = 4;
340       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
341       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
342       // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
343       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]]
344       // LAMBDA: [[G1_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
345       // LAMBDA: [[G1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G1_PTR_REF]]
346       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G1_REF]]
347       // LAMBDA: [[SIVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
348       // LAMBDA: [[SIVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_PTR_REF]]
349       // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SIVAR_REF]]
350     }();
351   }
352   }();
353   return 0;
354 #elif defined(BLOCKS)
355   // BLOCKS: [[G:@.+]] ={{.*}} global i{{[0-9]+}} 1212,
356   // BLOCKS-LABEL: @main
357   // BLOCKS: call
358   // BLOCKS: call void {{%.+}}(i8
359   ^{
360   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
361   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
362 #pragma omp parallel
363 #pragma omp for lastprivate(g, g1, sivar)
364   for (int i = 0; i < 2; ++i) {
365     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.+]])
366     // BLOCKS: alloca i{{[0-9]+}},
367     // BLOCKS: alloca i{{[0-9]+}},
368     // BLOCKS: alloca i{{[0-9]+}},
369     // BLOCKS: alloca i{{[0-9]+}},
370     // BLOCKS: alloca i{{[0-9]+}},
371     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
372     // BLOCKS: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 4
373     // BLOCKS: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
374     // BLOCKS: store i{{[0-9]+}}* [[SIVAR]], i{{[0-9]+}}** [[SIVAR_ADDR:%.+]],
375     // BLOCKS: {{.+}} = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_ADDR]]
376     // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
377     // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
378     // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
379     // BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
380     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
381     // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
382     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
383     // BLOCKS: call void {{%.+}}(i8
384     // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
385     g = 1;
386     g1 = 1;
387     sivar = 2;
388     // Check for final copying of private values back to original vars.
389     // BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
390     // BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
391     // BLOCKS: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
392     // BLOCKS: [[LAST_THEN]]
393     // Actual copying.
394 
395     // original g=private_g;
396     // BLOCKS: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
397     // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
398     // BLOCKS: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
399     // BLOCKS: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}},
400     // BLOCKS: br label %[[LAST_DONE]]
401     // BLOCKS: [[LAST_DONE]]
402     // BLOCKS: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
403     g = 1;
404     g1 = 1;
405     ^{
406       // BLOCKS: define {{.+}} void {{@.+}}(i8*
407       g = 2;
408       g1 = 1;
409       sivar = 4;
410       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
411       // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
412       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
413       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
414       // BLOCKS: store i{{[0-9]+}} 4, i{{[0-9]+}}*
415       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
416       // BLOCKS: ret
417     }();
418   }
419   }();
420   return 0;
421 // BLOCKS: define {{.+}} @{{.+}}([[SS_TY]]*
422 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
423 // BLOCKS: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
424 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
425 // BLOCKS: store i8
426 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 3
427 // BLOCKS: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
428 // BLOCKS: call void @__kmpc_for_static_init_4(
429 // BLOCKS-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
430 // BLOCKS: call void
431 // BLOCKS: call void @__kmpc_for_static_fini(%
432 // BLOCKS: ret
433 
434 // BLOCKS: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
435 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
436 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
437 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 3
438 // BLOCKS: call void @__kmpc_for_static_init_4(
439 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
440 // BLOCKS: call{{.*}} void
441 // BLOCKS: call void @__kmpc_for_static_fini(
442 // BLOCKS: br i1
443 // BLOCKS: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
444 // BLOCKS: store i8 %{{.+}}, i8* [[B_REF]],
445 // BLOCKS: br label
446 // BLOCKS: ret void
447 
448 // BLOCKS: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
449 // BLOCKS: alloca i{{[0-9]+}},
450 // BLOCKS: alloca i{{[0-9]+}},
451 // BLOCKS: alloca i{{[0-9]+}},
452 // BLOCKS: alloca i{{[0-9]+}},
453 // BLOCKS: alloca i{{[0-9]+}},
454 // BLOCKS: alloca i{{[0-9]+}},
455 // BLOCKS: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
456 // BLOCKS: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
457 // BLOCKS: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
458 // BLOCKS: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
459 // BLOCKS: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
460 // BLOCKS: call void @__kmpc_for_static_init_4(
461 // BLOCKS: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
462 // BLOCKS-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
463 // BLOCKS-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
464 // BLOCKS-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
465 // BLOCKS-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
466 // BLOCKS-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
467 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
468 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
469 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
470 // BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
471 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
472 // BLOCKS: call void @__kmpc_for_static_fini(
473 // BLOCKS: br i1
474 // BLOCKS: br label
475 // BLOCKS: ret void
476 #else
477   S<float> test;
478   int t_var = 0;
479   int vec[] = {1, 2};
480   S<float> s_arr[] = {1, 2};
481   S<float> var(3);
482 #pragma omp parallel
483 #pragma omp for lastprivate(t_var, vec, s_arr, var, sivar)
484   for (int i = 0; i < 2; ++i) {
485     vec[i] = t_var;
486     s_arr[i] = var;
487     sivar += i;
488   }
489 #pragma omp parallel
490 #pragma omp for lastprivate(A::x, B::x) firstprivate(f) lastprivate(f)
491   for (int i = 0; i < 2; ++i) {
492     A::x++;
493   }
494 #pragma omp parallel
495 #pragma omp for allocate(omp_const_mem_alloc: f) firstprivate(f) lastprivate(f)
496   for (int i = 0; i < 2; ++i) {
497     A::x++;
498   }
499 #pragma omp parallel
500 #pragma omp for allocate(omp_const_mem_alloc :cnt) lastprivate(cnt) lastprivate(CONDITIONAL f)
501   for (cnt = 0; cnt < 2; ++cnt) {
502     A::x++;
503     f = 0;
504   }
505   return tmain<int>();
506 #endif
507 }
508 
509 // CHECK: define{{.*}} i{{[0-9]+}} @main()
510 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
511 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* {{[^,]*}} [[TEST]])
512 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i32*)* [[MAIN_MICROTASK:@.+]] to void
513 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK1:@.+]] to void
514 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK2:@.+]] to void
515 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK3:@.+]] to void
516 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]()
517 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
518 // CHECK: ret
519 
520 // CHECK: define internal void [[MAIN_MICROTASK]](i32* noalias [[GTID_ADDR:%.+]], i32* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) %{{.+}}, [2 x i32]* nonnull align 4 dereferenceable(8) %{{.+}}, [2 x [[S_FLOAT_TY]]]* nonnull align 4 dereferenceable(8) %{{.+}}, [[S_FLOAT_TY]]* nonnull align 4 dereferenceable(4) %{{.+}})
521 // CHECK: alloca i{{[0-9]+}},
522 // CHECK: alloca i{{[0-9]+}},
523 // CHECK: alloca i{{[0-9]+}},
524 // CHECK: alloca i{{[0-9]+}},
525 // CHECK: alloca i{{[0-9]+}},
526 // CHECK: alloca i{{[0-9]+}},
527 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
528 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
529 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]],
530 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]],
531 // CHECK: [[SIVAR_PRIV:%.+]] = alloca i{{[0-9]+}},
532 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
533 
534 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
535 // CHECK: [[VEC_REF:%.+]] = load [2 x i32]*, [2 x i32]** %
536 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** %
537 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** %
538 
539 // Check for default initialization.
540 // CHECK-NOT: [[T_VAR_PRIV]]
541 // CHECK-NOT: [[VEC_PRIV]]
542 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]*
543 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* {{[^,]*}} [[S_ARR_PRIV_ITEM]])
544 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* {{[^,]*}} [[VAR_PRIV]])
545 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
546 // <Skip loop body>
547 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
548 
549 // Check for final copying of private values back to original vars.
550 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
551 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
552 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
553 // CHECK: [[LAST_THEN]]
554 // Actual copying.
555 
556 // original t_var=private_t_var;
557 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
558 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
559 
560 // original vec[]=private_vec[];
561 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
562 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
563 // CHECK: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_DEST]], i8* align {{[0-9]+}} [[VEC_SRC]],
564 
565 // original s_arr[]=private_s_arr[];
566 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
567 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]] to [[S_FLOAT_TY]]*
568 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
569 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
570 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
571 // CHECK: [[S_ARR_BODY]]
572 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
573 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
574 // CHECK: [[S_ARR_BODY_DONE]]
575 
576 // original var=private_var;
577 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* {{[^,]*}} [[VAR_REF]], [[S_FLOAT_TY]]* {{.*}} [[VAR_PRIV]])
578 // CHECK: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIV]],
579 // CHECK: br label %[[LAST_DONE]]
580 // CHECK: [[LAST_DONE]]
581 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* {{[^,]*}} [[VAR_PRIV]])
582 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
583 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
584 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
585 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
586 // CHECK: ret void
587 
588 //
589 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
590 // CHECK: [[F_PRIV:%.+]] = alloca float,
591 // CHECK-NOT: alloca float
592 // CHECK: [[X_PRIV:%.+]] = alloca double,
593 // CHECK-NOT: alloca float
594 // CHECK-NOT: alloca double
595 
596 // Check for default initialization.
597 // CHECK-NOT: [[X_PRIV]]
598 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]],
599 // CHECK: store float [[F_VAL]], float* [[F_PRIV]],
600 // CHECK-NOT: [[X_PRIV]]
601 
602 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
603 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
604 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
605 // <Skip loop body>
606 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
607 
608 // Check for final copying of private values back to original vars.
609 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
610 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
611 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
612 // CHECK: [[LAST_THEN]]
613 // Actual copying.
614 
615 // original x=private_x;
616 // CHECK: [[X_VAL:%.+]] = load double, double* [[X_PRIV]],
617 // CHECK: store double [[X_VAL]], double* [[X]],
618 
619 // original f=private_f;
620 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
621 // CHECK: store float [[F_VAL]], float* [[F]],
622 
623 // CHECK-NEXT: br label %[[LAST_DONE]]
624 // CHECK: [[LAST_DONE]]
625 
626 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
627 // CHECK: ret void
628 
629 // CHECK: define internal void [[MAIN_MICROTASK2]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
630 // CHECK-NOT: alloca float
631 
632 // Check for default initialization.
633 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
634 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
635 // CHECK: [[F_VOID_PTR:%.+]] = call i8* @__kmpc_alloc(i32 [[GTID]], i64 4, i8* inttoptr (i64 3 to i8*))
636 // CHECK: [[F_PRIV:%.+]] = bitcast i8* [[F_VOID_PTR]] to float*
637 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]],
638 // CHECK: store float [[F_VAL]], float* [[F_PRIV]],
639 
640 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
641 // <Skip loop body>
642 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
643 
644 // Check for final copying of private values back to original vars.
645 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
646 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
647 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
648 // CHECK: [[LAST_THEN]]
649 // Actual copying.
650 
651 // original f=private_f;
652 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
653 // CHECK: store float [[F_VAL]], float* [[F]],
654 
655 // CHECK-NEXT: br label %[[LAST_DONE]]
656 // CHECK: [[LAST_DONE]]
657 
658 // CHECK: [[F_VOID_PTR:%.+]] = bitcast float* [[F_PRIV]] to i8*
659 // CHECK-NEXT:      call void @__kmpc_free(i32 [[GTID]], i8* [[F_VOID_PTR]], i8* inttoptr (i64 3 to i8*))
660 // CHECK-NEXT: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
661 // CHECK-NEXT: ret void
662 
663 // CHECK: define internal void [[MAIN_MICROTASK3]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
664 
665 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
666 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
667 // CHECK: [[CNT_PRIV:%.+]] = call i8* @__kmpc_alloc(i32 [[GTID]], i64 1, i8* inttoptr (i64 3 to i8*))
668 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1)
669 // UB = min(UB, GlobalUB)
670 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
671 // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 1
672 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]]
673 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 1, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ]
674 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]]
675 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]]
676 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]]
677 // <Skip loop body>
678 // CHECK: store float 0.000000e+00, float* [[F_PRIV:%.+]],
679 // OMP50: call void @__kmpc_critical(%struct.ident_t* @{{.+}}, i32 [[GTID]], [8 x i32]* [[F_REGION:@.+]])
680 // OMP50: [[LAST_IV:%.+]] = load i32, i32* [[LAST_IV_F]],
681 // OMP50: [[CMP:%.+]] = icmp sle i32 [[LAST_IV]], [[IV:%.+]]
682 // OMP50: br i1 [[CMP]], label %[[LP_THEN:.+]], label %[[LP_DONE:[^,]+]]
683 
684 // OMP50: [[LP_THEN]]:
685 // OMP50: store i32 [[IV]], i32* [[LAST_IV_F]],
686 // OMP50: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
687 // OMP50: store float [[F_VAL]], float* [[LAST_F]],
688 // OMP50: br label %[[LP_DONE]]
689 
690 // OMP50: [[LP_DONE]]:
691 // OMP50: call void @__kmpc_end_critical(%struct.ident_t* @{{.+}}, i32 [[GTID]], [8 x i32]* [[F_REGION]])
692 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
693 
694 // Check for final copying of private values back to original vars.
695 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
696 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
697 // OMP50-NEXT: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
698 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
699 // CHECK: [[LAST_THEN]]
700 
701 // Calculate private cnt value.
702 // CHECK: store i8 2, i8* [[CNT_PRIV]]
703 // original cnt=private_cnt;
704 // CHECK: [[CNT_VAL:%.+]] = load i8, i8* [[CNT_PRIV]],
705 // CHECK: store i8 [[CNT_VAL]], i8* [[CNT]],
706 // OMP50: [[F_VAL:%.+]] = load float, float* [[LAST_F]],
707 // OMP50: store float [[F_VAL]], float* [[F_PRIV]],
708 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
709 // CHECK: store float [[F_VAL]], float* [[F]],
710 
711 // CHECK-NEXT: br label %[[LAST_DONE]]
712 // CHECK: [[LAST_DONE]]
713 
714 // CHECK:      call void @__kmpc_free(i32 [[GTID]], i8* [[CNT_PRIV]], i8* inttoptr (i64 3 to i8*))
715 // CHECK-NEXT: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
716 // CHECK-NEXT: ret void
717 
718 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
719 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
720 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* {{[^,]*}} [[TEST]])
721 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void
722 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
723 // CHECK: ret
724 
725 // CHECK: define {{.+}} @{{.+}}([[SS_TY]]*
726 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
727 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
728 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
729 // CHECK: store i8
730 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 3
731 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
732 // CHECK: call void @__kmpc_for_static_init_4(
733 // CHECK-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
734 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
735 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 3
736 // CHECK: call void @__kmpc_for_static_fini(%
737 // CHECK: ret
738 
739 // CHECK: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
740 // CHECK: alloca i{{[0-9]+}},
741 // CHECK: alloca i{{[0-9]+}},
742 // CHECK: alloca i{{[0-9]+}},
743 // CHECK: alloca i{{[0-9]+}},
744 // CHECK: alloca i{{[0-9]+}},
745 // CHECK: alloca i{{[0-9]+}},
746 // CHECK: alloca i{{[0-9]+}},
747 // CHECK: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
748 // CHECK: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
749 // CHECK: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
750 // CHECK: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
751 // CHECK: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
752 // CHECK: call void @__kmpc_for_static_init_4(
753 // CHECK: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
754 // CHECK-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
755 // CHECK-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
756 // CHECK-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
757 // CHECK-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
758 // CHECK-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
759 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
760 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
761 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
762 // CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
763 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
764 // CHECK: call void @__kmpc_for_static_fini(
765 // CHECK: br i1
766 // CHECK: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
767 // CHECK: store i8 %{{.+}}, i8* [[B_REF]],
768 // CHECK: br label
769 // CHECK: ret void
770 
771 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) %{{.+}}, [2 x i32]* nonnull align 4 dereferenceable(8) %{{.+}}, [2 x [[S_INT_TY]]]* nonnull align 4 dereferenceable(8) %{{.+}}, [[S_INT_TY]]* nonnull align 4 dereferenceable(4) %{{.+}})
772 // CHECK: alloca i{{[0-9]+}},
773 // CHECK: alloca i{{[0-9]+}},
774 // CHECK: alloca i{{[0-9]+}},
775 // CHECK: alloca i{{[0-9]+}},
776 // CHECK: alloca i{{[0-9]+}},
777 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, align 128
778 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], align 128
779 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], align 128
780 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], align 128
781 // CHECK: [[VAR_PRIV_REF:%.+]] = alloca [[S_INT_TY]]*,
782 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
783 
784 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
785 // CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** %
786 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** %
787 
788 // Check for default initialization.
789 // CHECK-NOT: [[T_VAR_PRIV]]
790 // CHECK-NOT: [[VEC_PRIV]]
791 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]*
792 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* {{[^,]*}} [[S_ARR_PRIV_ITEM]])
793 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
794 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* {{[^,]*}} [[VAR_PRIV]])
795 // CHECK: store [[S_INT_TY]]* [[VAR_PRIV]], [[S_INT_TY]]** [[VAR_PRIV_REF]]
796 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
797 // <Skip loop body>
798 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
799 
800 // Check for final copying of private values back to original vars.
801 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
802 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
803 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
804 // CHECK: [[LAST_THEN]]
805 // Actual copying.
806 
807 // original t_var=private_t_var;
808 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
809 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
810 
811 // original vec[]=private_vec[];
812 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
813 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
814 // CHECK: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_DEST]], i8* align {{[0-9]+}} [[VEC_SRC]],
815 
816 // original s_arr[]=private_s_arr[];
817 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
818 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]*
819 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
820 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
821 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
822 // CHECK: [[S_ARR_BODY]]
823 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
824 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
825 // CHECK: [[S_ARR_BODY_DONE]]
826 
827 // original var=private_var;
828 // CHECK: [[VAR_PRIV1:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_PRIV_REF]],
829 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{[^,]*}} [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV1]])
830 // CHECK: br label %[[LAST_DONE]]
831 // CHECK: [[LAST_DONE]]
832 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* {{[^,]*}} [[VAR_PRIV]])
833 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]*
834 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
835 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
836 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
837 // CHECK: ret void
838 #endif
839 
840