1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=OMP50
7 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
8 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=CHECK -check-prefix=OMP50
9 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
10 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
11 
12 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
13 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
14 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
15 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
16 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
17 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
18 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
19 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
20 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
21 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
22 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
23 // expected-no-diagnostics
24 #ifndef HEADER
25 #define HEADER
26 
27 #ifdef OMP5
28 #define CONDITIONAL conditional :
29 #else
30 #define CONDITIONAL
31 #endif //OMP5
32 
33 enum omp_allocator_handle_t {
34   omp_null_allocator = 0,
35   omp_default_mem_alloc = 1,
36   omp_large_cap_mem_alloc = 2,
37   omp_const_mem_alloc = 3,
38   omp_high_bw_mem_alloc = 4,
39   omp_low_lat_mem_alloc = 5,
40   omp_cgroup_mem_alloc = 6,
41   omp_pteam_mem_alloc = 7,
42   omp_thread_mem_alloc = 8,
43   KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__
44 };
45 
46 struct SS {
47   int a;
48   int b : 4;
49   int &c;
50   SS(int &d) : a(0), b(0), c(d) {
51 #pragma omp parallel
52 #pragma omp for lastprivate(a, b, c)
53     for (int i = 0; i < 2; ++i)
54 #ifdef LAMBDA
55       [&]() {
56         ++this->a, --b, (this)->c /= 1;
57 #pragma omp parallel
58 #pragma omp for lastprivate(a, b, c)
59         for (int i = 0; i < 2; ++i)
60           ++(this)->a, --b, this->c /= 1;
61       }();
62 #elif defined(BLOCKS)
63       ^{
64         ++a;
65         --this->b;
66         (this)->c /= 1;
67 #pragma omp parallel
68 #pragma omp for lastprivate(a, b, c)
69         for (int i = 0; i < 2; ++i)
70           ++(this)->a, --b, this->c /= 1;
71       }();
72 #else
73       ++this->a, --b, c /= 1;
74 #endif
75 #pragma omp for
76     for (a = 0; a < 2; ++a)
77 #ifdef LAMBDA
78       [&]() {
79         --this->a, ++b, (this)->c *= 2;
80 #pragma omp parallel
81 #pragma omp for lastprivate(b)
82         for (b = 0; b < 2; ++b)
83           ++(this)->a, --b, this->c /= 1;
84       }();
85 #elif defined(BLOCKS)
86       ^{
87         ++a;
88         --this->b;
89         (this)->c /= 1;
90 #pragma omp parallel
91 #pragma omp for
92         for (c = 0; c < 2; ++c)
93           ++(this)->a, --b, this->c /= 1;
94       }();
95 #else
96       ++this->a, --b, c /= 1;
97 #endif
98   }
99 };
100 
101 template <typename T>
102 struct SST {
103   T a;
104   SST() : a(T()) {
105 #pragma omp parallel
106 #pragma omp for lastprivate(a)
107     for (int i = 0; i < 2; ++i)
108 #ifdef LAMBDA
109       [&]() {
110         [&]() {
111           ++this->a;
112 #pragma omp parallel
113 #pragma omp for lastprivate(a)
114           for (int i = 0; i < 2; ++i)
115             ++(this)->a;
116         }();
117       }();
118 #elif defined(BLOCKS)
119       ^{
120         ^{
121           ++a;
122 #pragma omp parallel
123 #pragma omp for lastprivate(a)
124           for (int i = 0; i < 2; ++i)
125             ++(this)->a;
126         }();
127       }();
128 #else
129       ++(this)->a;
130 #endif
131 #pragma omp for
132     for (a = 0; a < 2; ++a)
133 #ifdef LAMBDA
134       [&]() {
135         ++this->a;
136 #pragma omp parallel
137 #pragma omp for
138         for (a = 0; a < 2; ++(this)->a)
139           ++(this)->a;
140       }();
141 #elif defined(BLOCKS)
142       ^{
143         ++a;
144 #pragma omp parallel
145 #pragma omp for
146         for (this->a = 0; a < 2; ++a)
147           ++(this)->a;
148       }();
149 #else
150       ++(this)->a;
151 #endif
152   }
153 };
154 
155 template <class T>
156 struct S {
157   T f;
158   S(T a) : f(a) {}
159   S() : f() {}
160   S<T> &operator=(const S<T> &);
161   operator T() { return T(); }
162   ~S() {}
163 };
164 
165 volatile int g __attribute__((aligned(128)))= 1212;
166 volatile int &g1 = g;
167 float f;
168 char cnt;
169 
170 // CHECK: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
171 // LAMBDA: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
172 // BLOCKS: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
173 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
174 // CHECK: [[S_INT_TY:%.+]] = type { i32 }
175 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr global %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
176 // CHECK-DAG: [[X:@.+]] = global double 0.0
177 // CHECK-DAG: [[F:@.+]] = global float 0.0
178 // CHECK-DAG: [[CNT:@.+]] = global i8 0
179 // OMP50-DAG: [[LAST_IV_F:@.+]] = {{.*}}common global i32 0
180 // OMP50-DAG: [[LAST_F:@.+]] = {{.*}}common global float 0.000000e+00,
181 
182 template <typename T>
183 T tmain() {
184   S<T> test;
185   SST<T> sst;
186   T t_var __attribute__((aligned(128))) = T();
187   T vec[] __attribute__((aligned(128))) = {1, 2};
188   S<T> s_arr[] __attribute__((aligned(128))) = {1, 2};
189   S<T> &var __attribute__((aligned(128))) = test;
190 #pragma omp parallel
191 #pragma omp for lastprivate(t_var, vec, s_arr, var)
192   for (int i = 0; i < 2; ++i) {
193     vec[i] = t_var;
194     s_arr[i] = var;
195   }
196   return T();
197 }
198 
199 namespace A {
200 double x;
201 }
202 namespace B {
203 using A::x;
204 }
205 
206 int main() {
207   static int sivar;
208   SS ss(sivar);
209 #ifdef LAMBDA
210   // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
211   // LAMBDA: [[SIVAR:@.+]] = internal global i{{[0-9]+}} 0,
212   // LAMBDA-LABEL: @main
213   // LAMBDA: alloca [[SS_TY]],
214   // LAMBDA: alloca [[CAP_TY:%.+]],
215   // FIXME: The outer lambda should not capture 'sivar'; that capture is not
216   // used for anything.
217   // LAMBDA: store {{.*}}@_ZZ4mainE5sivar,
218   // LAMBDA: call void [[OUTER_LAMBDA:@.+]]([[CAP_TY]]*
219   [&]() {
220   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
221   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* @_ZZ4mainE5sivar)
222 #pragma omp parallel
223 #pragma omp for lastprivate(g, g1, sivar)
224   for (int i = 0; i < 2; ++i) {
225     // LAMBDA: define {{.+}} @{{.+}}([[SS_TY]]*
226     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
227     // LAMBDA: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
228     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
229     // LAMBDA: store i8
230     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
231     // LAMBDA: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
232     // LAMBDA: call void @__kmpc_for_static_init_4(
233     // LAMBDA-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
234     // LAMBDA: call{{.*}} void [[SS_LAMBDA1:@[^ ]+]]
235     // LAMBDA: call void @__kmpc_for_static_fini(%
236     // LAMBDA: ret
237 
238     // LAMBDA: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
239     // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
240     // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
241     // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
242     // LAMBDA: call void @__kmpc_for_static_init_4(
243     // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
244     // LAMBDA: call{{.*}} void [[SS_LAMBDA:@[^ ]+]]
245     // LAMBDA: call void @__kmpc_for_static_fini(
246     // LAMBDA: br i1
247     // LAMBDA: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
248     // LAMBDA: store i8 %{{.+}}, i8* [[B_REF]],
249     // LAMBDA: br label
250     // LAMBDA: ret void
251 
252     // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
253     // LAMBDA: alloca i{{[0-9]+}},
254     // LAMBDA: alloca i{{[0-9]+}},
255     // LAMBDA: alloca i{{[0-9]+}},
256     // LAMBDA: alloca i{{[0-9]+}},
257     // LAMBDA: alloca i{{[0-9]+}},
258     // LAMBDA: alloca i{{[0-9]+}},
259     // LAMBDA: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
260     // LAMBDA: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
261     // LAMBDA: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
262     // LAMBDA: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
263     // LAMBDA: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
264     // LAMBDA: call void @__kmpc_for_static_init_4(
265     // LAMBDA: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
266     // LAMBDA-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
267     // LAMBDA-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
268     // LAMBDA-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
269     // LAMBDA-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
270     // LAMBDA-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
271     // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
272     // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
273     // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
274     // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
275     // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
276     // LAMBDA: call void @__kmpc_for_static_fini(
277     // LAMBDA: br i1
278     // LAMBDA: br label
279     // LAMBDA: ret void
280 
281     // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
282     // LAMBDA: ret void
283 
284     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.+]])
285     // LAMBDA: alloca i{{[0-9]+}},
286     // LAMBDA: alloca i{{[0-9]+}},
287     // LAMBDA: alloca i{{[0-9]+}},
288     // LAMBDA: alloca i{{[0-9]+}},
289     // LAMBDA: alloca i{{[0-9]+}},
290     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
291     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
292     // LAMBDA: [[G1_PRIVATE_REF:%.+]] = alloca i{{[0-9]+}}*,
293     // LAMBDA: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
294     // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}},
295 
296     // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
297     // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
298 
299     // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
300     // LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
301     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G1_PRIVATE_REF]],
302     // LAMBDA: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}* [[G1_PRIVATE_ADDR]],
303     // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
304     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
305     // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
306     // LAMBDA: [[G1_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
307     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G1_PRIVATE_REF]],
308     // LAMBDA: store i{{[0-9]+}}* [[G1_PRIVATE_ADDR]], i{{[0-9]+}}** [[G1_PRIVATE_ADDR_REF]]
309     // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
310     // LAMBDA: store i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]], i{{[0-9]+}}** [[SIVAR_PRIVATE_ADDR_REF]]
311     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
312     // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
313     g = 1;
314     g1 = 1;
315     sivar = 2;
316     // Check for final copying of private values back to original vars.
317     // LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
318     // LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
319     // LAMBDA: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
320     // LAMBDA: [[LAST_THEN]]
321     // Actual copying.
322 
323     // original g=private_g;
324     // LAMBDA: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
325     // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
326 
327     // original sivar=private_sivar;
328     // LAMBDA: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
329     // LAMBDA: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}},
330     // LAMBDA: br label %[[LAST_DONE]]
331     // LAMBDA: [[LAST_DONE]]
332     // LAMBDA: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
333     [&]() {
334       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
335       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
336       g = 2;
337       g1 = 2;
338       sivar = 4;
339       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
340       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
341       // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
342       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]]
343       // LAMBDA: [[G1_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
344       // LAMBDA: [[G1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G1_PTR_REF]]
345       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G1_REF]]
346       // LAMBDA: [[SIVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
347       // LAMBDA: [[SIVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_PTR_REF]]
348       // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SIVAR_REF]]
349     }();
350   }
351   }();
352   return 0;
353 #elif defined(BLOCKS)
354   // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
355   // BLOCKS-LABEL: @main
356   // BLOCKS: call
357   // BLOCKS: call void {{%.+}}(i8
358   ^{
359   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
360   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
361 #pragma omp parallel
362 #pragma omp for lastprivate(g, g1, sivar)
363   for (int i = 0; i < 2; ++i) {
364     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.+]])
365     // BLOCKS: alloca i{{[0-9]+}},
366     // BLOCKS: alloca i{{[0-9]+}},
367     // BLOCKS: alloca i{{[0-9]+}},
368     // BLOCKS: alloca i{{[0-9]+}},
369     // BLOCKS: alloca i{{[0-9]+}},
370     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
371     // BLOCKS: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 4
372     // BLOCKS: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
373     // BLOCKS: store i{{[0-9]+}}* [[SIVAR]], i{{[0-9]+}}** [[SIVAR_ADDR:%.+]],
374     // BLOCKS: {{.+}} = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_ADDR]]
375     // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
376     // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
377     // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
378     // BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
379     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
380     // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
381     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
382     // BLOCKS: call void {{%.+}}(i8
383     // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
384     g = 1;
385     g1 = 1;
386     sivar = 2;
387     // Check for final copying of private values back to original vars.
388     // BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
389     // BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
390     // BLOCKS: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
391     // BLOCKS: [[LAST_THEN]]
392     // Actual copying.
393 
394     // original g=private_g;
395     // BLOCKS: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
396     // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
397     // BLOCKS: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
398     // BLOCKS: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}},
399     // BLOCKS: br label %[[LAST_DONE]]
400     // BLOCKS: [[LAST_DONE]]
401     // BLOCKS: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
402     g = 1;
403     g1 = 1;
404     ^{
405       // BLOCKS: define {{.+}} void {{@.+}}(i8*
406       g = 2;
407       g1 = 1;
408       sivar = 4;
409       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
410       // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
411       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
412       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
413       // BLOCKS: store i{{[0-9]+}} 4, i{{[0-9]+}}*
414       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
415       // BLOCKS: ret
416     }();
417   }
418   }();
419   return 0;
420 // BLOCKS: define {{.+}} @{{.+}}([[SS_TY]]*
421 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
422 // BLOCKS: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
423 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
424 // BLOCKS: store i8
425 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
426 // BLOCKS: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
427 // BLOCKS: call void @__kmpc_for_static_init_4(
428 // BLOCKS-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
429 // BLOCKS: call void
430 // BLOCKS: call void @__kmpc_for_static_fini(%
431 // BLOCKS: ret
432 
433 // BLOCKS: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
434 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
435 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
436 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
437 // BLOCKS: call void @__kmpc_for_static_init_4(
438 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
439 // BLOCKS: call{{.*}} void
440 // BLOCKS: call void @__kmpc_for_static_fini(
441 // BLOCKS: br i1
442 // BLOCKS: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
443 // BLOCKS: store i8 %{{.+}}, i8* [[B_REF]],
444 // BLOCKS: br label
445 // BLOCKS: ret void
446 
447 // BLOCKS: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
448 // BLOCKS: alloca i{{[0-9]+}},
449 // BLOCKS: alloca i{{[0-9]+}},
450 // BLOCKS: alloca i{{[0-9]+}},
451 // BLOCKS: alloca i{{[0-9]+}},
452 // BLOCKS: alloca i{{[0-9]+}},
453 // BLOCKS: alloca i{{[0-9]+}},
454 // BLOCKS: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
455 // BLOCKS: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
456 // BLOCKS: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
457 // BLOCKS: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
458 // BLOCKS: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
459 // BLOCKS: call void @__kmpc_for_static_init_4(
460 // BLOCKS: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
461 // BLOCKS-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
462 // BLOCKS-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
463 // BLOCKS-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
464 // BLOCKS-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
465 // BLOCKS-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
466 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
467 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
468 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
469 // BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
470 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
471 // BLOCKS: call void @__kmpc_for_static_fini(
472 // BLOCKS: br i1
473 // BLOCKS: br label
474 // BLOCKS: ret void
475 #else
476   S<float> test;
477   int t_var = 0;
478   int vec[] = {1, 2};
479   S<float> s_arr[] = {1, 2};
480   S<float> var(3);
481 #pragma omp parallel
482 #pragma omp for lastprivate(t_var, vec, s_arr, var, sivar)
483   for (int i = 0; i < 2; ++i) {
484     vec[i] = t_var;
485     s_arr[i] = var;
486     sivar += i;
487   }
488 #pragma omp parallel
489 #pragma omp for lastprivate(A::x, B::x) firstprivate(f) lastprivate(f)
490   for (int i = 0; i < 2; ++i) {
491     A::x++;
492   }
493 #pragma omp parallel
494 #pragma omp for allocate(omp_const_mem_alloc: f) firstprivate(f) lastprivate(f)
495   for (int i = 0; i < 2; ++i) {
496     A::x++;
497   }
498 #pragma omp parallel
499 #pragma omp for allocate(omp_const_mem_alloc :cnt) lastprivate(cnt) lastprivate(CONDITIONAL f)
500   for (cnt = 0; cnt < 2; ++cnt) {
501     A::x++;
502     f = 0;
503   }
504   return tmain<int>();
505 #endif
506 }
507 
508 // CHECK: define i{{[0-9]+}} @main()
509 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
510 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
511 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i32*)* [[MAIN_MICROTASK:@.+]] to void
512 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK1:@.+]] to void
513 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK2:@.+]] to void
514 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK3:@.+]] to void
515 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]()
516 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
517 // CHECK: ret
518 
519 // CHECK: define internal void [[MAIN_MICROTASK]](i32* noalias [[GTID_ADDR:%.+]], i32* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) %{{.+}}, [2 x i32]* nonnull align 4 dereferenceable(8) %{{.+}}, [2 x [[S_FLOAT_TY]]]* nonnull align 4 dereferenceable(8) %{{.+}}, [[S_FLOAT_TY]]* nonnull align 4 dereferenceable(4) %{{.+}})
520 // CHECK: alloca i{{[0-9]+}},
521 // CHECK: alloca i{{[0-9]+}},
522 // CHECK: alloca i{{[0-9]+}},
523 // CHECK: alloca i{{[0-9]+}},
524 // CHECK: alloca i{{[0-9]+}},
525 // CHECK: alloca i{{[0-9]+}},
526 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
527 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
528 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]],
529 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]],
530 // CHECK: [[SIVAR_PRIV:%.+]] = alloca i{{[0-9]+}},
531 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
532 
533 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
534 // CHECK: [[VEC_REF:%.+]] = load [2 x i32]*, [2 x i32]** %
535 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** %
536 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** %
537 
538 // Check for default initialization.
539 // CHECK-NOT: [[T_VAR_PRIV]]
540 // CHECK-NOT: [[VEC_PRIV]]
541 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]*
542 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]])
543 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
544 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
545 // <Skip loop body>
546 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
547 
548 // Check for final copying of private values back to original vars.
549 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
550 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
551 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
552 // CHECK: [[LAST_THEN]]
553 // Actual copying.
554 
555 // original t_var=private_t_var;
556 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
557 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
558 
559 // original vec[]=private_vec[];
560 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
561 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
562 // CHECK: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_DEST]], i8* align {{[0-9]+}} [[VEC_SRC]],
563 
564 // original s_arr[]=private_s_arr[];
565 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
566 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]] to [[S_FLOAT_TY]]*
567 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
568 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
569 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
570 // CHECK: [[S_ARR_BODY]]
571 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
572 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
573 // CHECK: [[S_ARR_BODY_DONE]]
574 
575 // original var=private_var;
576 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* {{.*}} [[VAR_PRIV]])
577 // CHECK: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIV]],
578 // CHECK: br label %[[LAST_DONE]]
579 // CHECK: [[LAST_DONE]]
580 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
581 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
582 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
583 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
584 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
585 // CHECK: ret void
586 
587 //
588 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
589 // CHECK: [[F_PRIV:%.+]] = alloca float,
590 // CHECK-NOT: alloca float
591 // CHECK: [[X_PRIV:%.+]] = alloca double,
592 // CHECK-NOT: alloca float
593 // CHECK-NOT: alloca double
594 
595 // Check for default initialization.
596 // CHECK-NOT: [[X_PRIV]]
597 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]],
598 // CHECK: store float [[F_VAL]], float* [[F_PRIV]],
599 // CHECK-NOT: [[X_PRIV]]
600 
601 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
602 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
603 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
604 // <Skip loop body>
605 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
606 
607 // Check for final copying of private values back to original vars.
608 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
609 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
610 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
611 // CHECK: [[LAST_THEN]]
612 // Actual copying.
613 
614 // original x=private_x;
615 // CHECK: [[X_VAL:%.+]] = load double, double* [[X_PRIV]],
616 // CHECK: store double [[X_VAL]], double* [[X]],
617 
618 // original f=private_f;
619 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
620 // CHECK: store float [[F_VAL]], float* [[F]],
621 
622 // CHECK-NEXT: br label %[[LAST_DONE]]
623 // CHECK: [[LAST_DONE]]
624 
625 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
626 // CHECK: ret void
627 
628 // CHECK: define internal void [[MAIN_MICROTASK2]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
629 // CHECK-NOT: alloca float
630 
631 // Check for default initialization.
632 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
633 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
634 // CHECK: [[F_VOID_PTR:%.+]] = call i8* @__kmpc_alloc(i32 [[GTID]], i64 4, i8* inttoptr (i64 3 to i8*))
635 // CHECK: [[F_PRIV:%.+]] = bitcast i8* [[F_VOID_PTR]] to float*
636 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]],
637 // CHECK: store float [[F_VAL]], float* [[F_PRIV]],
638 
639 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
640 // <Skip loop body>
641 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
642 
643 // Check for final copying of private values back to original vars.
644 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
645 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
646 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
647 // CHECK: [[LAST_THEN]]
648 // Actual copying.
649 
650 // original f=private_f;
651 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
652 // CHECK: store float [[F_VAL]], float* [[F]],
653 
654 // CHECK-NEXT: br label %[[LAST_DONE]]
655 // CHECK: [[LAST_DONE]]
656 
657 // CHECK:      call void @__kmpc_free(i32 [[GTID]], i8* [[F_VOID_PTR]], i8* inttoptr (i64 3 to i8*))
658 // CHECK-NEXT: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
659 // CHECK-NEXT: ret void
660 
661 // CHECK: define internal void [[MAIN_MICROTASK3]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
662 
663 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
664 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
665 // CHECK: [[CNT_PRIV:%.+]] = call i8* @__kmpc_alloc(i32 [[GTID]], i64 1, i8* inttoptr (i64 3 to i8*))
666 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1)
667 // UB = min(UB, GlobalUB)
668 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
669 // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 1
670 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]]
671 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 1, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ]
672 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]]
673 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]]
674 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]]
675 // <Skip loop body>
676 // CHECK: store float 0.000000e+00, float* [[F_PRIV:%.+]],
677 // OMP50: call void @__kmpc_critical(%struct.ident_t* @{{.+}}, i32 [[GTID]], [8 x i32]* [[F_REGION:@.+]])
678 // OMP50: [[LAST_IV:%.+]] = load i32, i32* [[LAST_IV_F]],
679 // OMP50: [[CMP:%.+]] = icmp sle i32 [[LAST_IV]], [[IV:%.+]]
680 // OMP50: br i1 [[CMP]], label %[[LP_THEN:.+]], label %[[LP_DONE:[^,]+]]
681 
682 // OMP50: [[LP_THEN]]:
683 // OMP50: store i32 [[IV]], i32* [[LAST_IV_F]],
684 // OMP50: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
685 // OMP50: store float [[F_VAL]], float* [[LAST_F]],
686 // OMP50: br label %[[LP_DONE]]
687 
688 // OMP50: [[LP_DONE]]:
689 // OMP50: call void @__kmpc_end_critical(%struct.ident_t* @{{.+}}, i32 [[GTID]], [8 x i32]* [[F_REGION]])
690 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
691 
692 // Check for final copying of private values back to original vars.
693 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
694 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
695 // OMP50-NEXT: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
696 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
697 // CHECK: [[LAST_THEN]]
698 
699 // Calculate private cnt value.
700 // CHECK: store i8 2, i8* [[CNT_PRIV]]
701 // original cnt=private_cnt;
702 // CHECK: [[CNT_VAL:%.+]] = load i8, i8* [[CNT_PRIV]],
703 // CHECK: store i8 [[CNT_VAL]], i8* [[CNT]],
704 // OMP50: [[F_VAL:%.+]] = load float, float* [[LAST_F]],
705 // OMP50: store float [[F_VAL]], float* [[F_PRIV]],
706 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
707 // CHECK: store float [[F_VAL]], float* [[F]],
708 
709 // CHECK-NEXT: br label %[[LAST_DONE]]
710 // CHECK: [[LAST_DONE]]
711 
712 // CHECK:      call void @__kmpc_free(i32 [[GTID]], i8* [[CNT_PRIV]], i8* inttoptr (i64 3 to i8*))
713 // CHECK-NEXT: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
714 // CHECK-NEXT: ret void
715 
716 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
717 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
718 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
719 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void
720 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
721 // CHECK: ret
722 
723 // CHECK: define {{.+}} @{{.+}}([[SS_TY]]*
724 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
725 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
726 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
727 // CHECK: store i8
728 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
729 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
730 // CHECK: call void @__kmpc_for_static_init_4(
731 // CHECK-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
732 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
733 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
734 // CHECK: call void @__kmpc_for_static_fini(%
735 // CHECK: ret
736 
737 // CHECK: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
738 // CHECK: alloca i{{[0-9]+}},
739 // CHECK: alloca i{{[0-9]+}},
740 // CHECK: alloca i{{[0-9]+}},
741 // CHECK: alloca i{{[0-9]+}},
742 // CHECK: alloca i{{[0-9]+}},
743 // CHECK: alloca i{{[0-9]+}},
744 // CHECK: alloca i{{[0-9]+}},
745 // CHECK: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
746 // CHECK: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
747 // CHECK: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
748 // CHECK: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
749 // CHECK: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
750 // CHECK: call void @__kmpc_for_static_init_4(
751 // CHECK: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
752 // CHECK-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
753 // CHECK-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
754 // CHECK-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
755 // CHECK-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
756 // CHECK-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
757 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
758 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
759 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
760 // CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
761 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
762 // CHECK: call void @__kmpc_for_static_fini(
763 // CHECK: br i1
764 // CHECK: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
765 // CHECK: store i8 %{{.+}}, i8* [[B_REF]],
766 // CHECK: br label
767 // CHECK: ret void
768 
769 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) %{{.+}}, [2 x i32]* nonnull align 4 dereferenceable(8) %{{.+}}, [2 x [[S_INT_TY]]]* nonnull align 4 dereferenceable(8) %{{.+}}, [[S_INT_TY]]* nonnull align 4 dereferenceable(4) %{{.+}})
770 // CHECK: alloca i{{[0-9]+}},
771 // CHECK: alloca i{{[0-9]+}},
772 // CHECK: alloca i{{[0-9]+}},
773 // CHECK: alloca i{{[0-9]+}},
774 // CHECK: alloca i{{[0-9]+}},
775 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, align 128
776 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], align 128
777 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], align 128
778 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], align 128
779 // CHECK: [[VAR_PRIV_REF:%.+]] = alloca [[S_INT_TY]]*,
780 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
781 
782 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
783 // CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** %
784 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** %
785 
786 // Check for default initialization.
787 // CHECK-NOT: [[T_VAR_PRIV]]
788 // CHECK-NOT: [[VEC_PRIV]]
789 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]*
790 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]])
791 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
792 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]])
793 // CHECK: store [[S_INT_TY]]* [[VAR_PRIV]], [[S_INT_TY]]** [[VAR_PRIV_REF]]
794 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
795 // <Skip loop body>
796 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
797 
798 // Check for final copying of private values back to original vars.
799 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
800 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
801 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
802 // CHECK: [[LAST_THEN]]
803 // Actual copying.
804 
805 // original t_var=private_t_var;
806 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
807 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
808 
809 // original vec[]=private_vec[];
810 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
811 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
812 // CHECK: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_DEST]], i8* align {{[0-9]+}} [[VEC_SRC]],
813 
814 // original s_arr[]=private_s_arr[];
815 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
816 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]*
817 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
818 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
819 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
820 // CHECK: [[S_ARR_BODY]]
821 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
822 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
823 // CHECK: [[S_ARR_BODY_DONE]]
824 
825 // original var=private_var;
826 // CHECK: [[VAR_PRIV1:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_PRIV_REF]],
827 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV1]])
828 // CHECK: br label %[[LAST_DONE]]
829 // CHECK: [[LAST_DONE]]
830 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
831 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]*
832 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
833 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
834 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
835 // CHECK: ret void
836 #endif
837 
838