1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 
7 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
8 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
9 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
10 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
11 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
12 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
13 // expected-no-diagnostics
14 #ifndef HEADER
15 #define HEADER
16 
17 struct SS {
18   int a;
19   int b : 4;
20   int &c;
21   SS(int &d) : a(0), b(0), c(d) {
22 #pragma omp parallel
23 #pragma omp for lastprivate(a, b, c)
24     for (int i = 0; i < 2; ++i)
25 #ifdef LAMBDA
26       [&]() {
27         ++this->a, --b, (this)->c /= 1;
28 #pragma omp parallel
29 #pragma omp for lastprivate(a, b, c)
30         for (int i = 0; i < 2; ++i)
31           ++(this)->a, --b, this->c /= 1;
32       }();
33 #elif defined(BLOCKS)
34       ^{
35         ++a;
36         --this->b;
37         (this)->c /= 1;
38 #pragma omp parallel
39 #pragma omp for lastprivate(a, b, c)
40         for (int i = 0; i < 2; ++i)
41           ++(this)->a, --b, this->c /= 1;
42       }();
43 #else
44       ++this->a, --b, c /= 1;
45 #endif
46 #pragma omp for
47     for (a = 0; a < 2; ++a)
48 #ifdef LAMBDA
49       [&]() {
50         --this->a, ++b, (this)->c *= 2;
51 #pragma omp parallel
52 #pragma omp for lastprivate(b)
53         for (b = 0; b < 2; ++b)
54           ++(this)->a, --b, this->c /= 1;
55       }();
56 #elif defined(BLOCKS)
57       ^{
58         ++a;
59         --this->b;
60         (this)->c /= 1;
61 #pragma omp parallel
62 #pragma omp for
63         for (c = 0; c < 2; ++c)
64           ++(this)->a, --b, this->c /= 1;
65       }();
66 #else
67       ++this->a, --b, c /= 1;
68 #endif
69   }
70 };
71 
72 template <typename T>
73 struct SST {
74   T a;
75   SST() : a(T()) {
76 #pragma omp parallel
77 #pragma omp for lastprivate(a)
78     for (int i = 0; i < 2; ++i)
79 #ifdef LAMBDA
80       [&]() {
81         [&]() {
82           ++this->a;
83 #pragma omp parallel
84 #pragma omp for lastprivate(a)
85           for (int i = 0; i < 2; ++i)
86             ++(this)->a;
87         }();
88       }();
89 #elif defined(BLOCKS)
90       ^{
91         ^{
92           ++a;
93 #pragma omp parallel
94 #pragma omp for lastprivate(a)
95           for (int i = 0; i < 2; ++i)
96             ++(this)->a;
97         }();
98       }();
99 #else
100       ++(this)->a;
101 #endif
102 #pragma omp for
103     for (a = 0; a < 2; ++a)
104 #ifdef LAMBDA
105       [&]() {
106         ++this->a;
107 #pragma omp parallel
108 #pragma omp for
109         for (a = 0; a < 2; ++(this)->a)
110           ++(this)->a;
111       }();
112 #elif defined(BLOCKS)
113       ^{
114         ++a;
115 #pragma omp parallel
116 #pragma omp for
117         for (this->a = 0; a < 2; ++a)
118           ++(this)->a;
119       }();
120 #else
121       ++(this)->a;
122 #endif
123   }
124 };
125 
126 template <class T>
127 struct S {
128   T f;
129   S(T a) : f(a) {}
130   S() : f() {}
131   S<T> &operator=(const S<T> &);
132   operator T() { return T(); }
133   ~S() {}
134 };
135 
136 volatile int g __attribute__((aligned(128)))= 1212;
137 volatile int &g1 = g;
138 float f;
139 char cnt;
140 
141 // CHECK: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
142 // LAMBDA: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
143 // BLOCKS: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
144 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
145 // CHECK: [[S_INT_TY:%.+]] = type { i32 }
146 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
147 // CHECK-DAG: [[X:@.+]] = global double 0.0
148 // CHECK-DAG: [[F:@.+]] = global float 0.0
149 // CHECK-DAG: [[CNT:@.+]] = global i8 0
150 template <typename T>
151 T tmain() {
152   S<T> test;
153   SST<T> sst;
154   T t_var __attribute__((aligned(128))) = T();
155   T vec[] __attribute__((aligned(128))) = {1, 2};
156   S<T> s_arr[] __attribute__((aligned(128))) = {1, 2};
157   S<T> &var __attribute__((aligned(128))) = test;
158 #pragma omp parallel
159 #pragma omp for lastprivate(t_var, vec, s_arr, var)
160   for (int i = 0; i < 2; ++i) {
161     vec[i] = t_var;
162     s_arr[i] = var;
163   }
164   return T();
165 }
166 
167 namespace A {
168 double x;
169 }
170 namespace B {
171 using A::x;
172 }
173 
174 int main() {
175   static int sivar;
176   SS ss(sivar);
177 #ifdef LAMBDA
178   // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
179   // LAMBDA: [[SIVAR:@.+]] = internal global i{{[0-9]+}} 0,
180   // LAMBDA-LABEL: @main
181   // LAMBDA: alloca [[SS_TY]],
182   // LAMBDA: alloca [[CAP_TY:%.+]],
183   // LAMBDA: call void [[OUTER_LAMBDA:@.+]]([[CAP_TY]]*
184   [&]() {
185   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
186   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* %{{.+}})
187 #pragma omp parallel
188 #pragma omp for lastprivate(g, g1, sivar)
189   for (int i = 0; i < 2; ++i) {
190     // LAMBDA: define {{.+}} @{{.+}}([[SS_TY]]*
191     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
192     // LAMBDA: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
193     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
194     // LAMBDA: store i8
195     // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
196     // LAMBDA: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
197     // LAMBDA: call void @__kmpc_for_static_init_4(
198     // LAMBDA-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
199     // LAMBDA: call{{.*}} void [[SS_LAMBDA1:@[^ ]+]]
200     // LAMBDA: call void @__kmpc_for_static_fini(%
201     // LAMBDA: ret
202 
203     // LAMBDA: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
204     // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
205     // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
206     // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
207     // LAMBDA: call void @__kmpc_for_static_init_4(
208     // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
209     // LAMBDA: call{{.*}} void [[SS_LAMBDA:@[^ ]+]]
210     // LAMBDA: call void @__kmpc_for_static_fini(
211     // LAMBDA: br i1
212     // LAMBDA: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
213     // LAMBDA: store i8 %{{.+}}, i8* [[B_REF]],
214     // LAMBDA: br label
215     // LAMBDA: ret void
216 
217     // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
218     // LAMBDA: alloca i{{[0-9]+}},
219     // LAMBDA: alloca i{{[0-9]+}},
220     // LAMBDA: alloca i{{[0-9]+}},
221     // LAMBDA: alloca i{{[0-9]+}},
222     // LAMBDA: alloca i{{[0-9]+}},
223     // LAMBDA: alloca i{{[0-9]+}},
224     // LAMBDA: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
225     // LAMBDA: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
226     // LAMBDA: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
227     // LAMBDA: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
228     // LAMBDA: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
229     // LAMBDA: call void @__kmpc_for_static_init_4(
230     // LAMBDA: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
231     // LAMBDA-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
232     // LAMBDA-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
233     // LAMBDA-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
234     // LAMBDA-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
235     // LAMBDA-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
236     // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
237     // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
238     // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
239     // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
240     // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
241     // LAMBDA: call void @__kmpc_for_static_fini(
242     // LAMBDA: br i1
243     // LAMBDA: br label
244     // LAMBDA: ret void
245 
246     // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
247     // LAMBDA: ret void
248 
249     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR:%.+]])
250     // LAMBDA: alloca i{{[0-9]+}},
251     // LAMBDA: alloca i{{[0-9]+}},
252     // LAMBDA: alloca i{{[0-9]+}},
253     // LAMBDA: alloca i{{[0-9]+}},
254     // LAMBDA: alloca i{{[0-9]+}},
255     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
256     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
257     // LAMBDA: [[G1_PRIVATE_REF:%.+]] = alloca i{{[0-9]+}}*,
258     // LAMBDA: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
259     // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}},
260 
261     // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
262     // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
263 
264     // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
265     // LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
266     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G1_PRIVATE_REF]],
267     // LAMBDA: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}* [[G1_PRIVATE_ADDR]],
268     // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
269     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
270     // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
271     // LAMBDA: [[G1_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
272     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G1_PRIVATE_REF]],
273     // LAMBDA: store i{{[0-9]+}}* [[G1_PRIVATE_ADDR]], i{{[0-9]+}}** [[G1_PRIVATE_ADDR_REF]]
274     // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
275     // LAMBDA: store i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]], i{{[0-9]+}}** [[SIVAR_PRIVATE_ADDR_REF]]
276     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
277     // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
278     g = 1;
279     g1 = 1;
280     sivar = 2;
281     // Check for final copying of private values back to original vars.
282     // LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
283     // LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
284     // LAMBDA: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
285     // LAMBDA: [[LAST_THEN]]
286     // Actual copying.
287 
288     // original g=private_g;
289     // LAMBDA: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
290     // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
291 
292     // original sivar=private_sivar;
293     // LAMBDA: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
294     // LAMBDA: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}},
295     // LAMBDA: br label %[[LAST_DONE]]
296     // LAMBDA: [[LAST_DONE]]
297     // LAMBDA: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
298     [&]() {
299       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
300       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
301       g = 2;
302       g1 = 2;
303       sivar = 4;
304       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
305       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
306       // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
307       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]]
308       // LAMBDA: [[G1_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
309       // LAMBDA: [[G1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G1_PTR_REF]]
310       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G1_REF]]
311       // LAMBDA: [[SIVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
312       // LAMBDA: [[SIVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_PTR_REF]]
313       // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SIVAR_REF]]
314     }();
315   }
316   }();
317   return 0;
318 #elif defined(BLOCKS)
319   // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
320   // BLOCKS-LABEL: @main
321   // BLOCKS: call
322   // BLOCKS: call void {{%.+}}(i8
323   ^{
324   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
325   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
326 #pragma omp parallel
327 #pragma omp for lastprivate(g, g1, sivar)
328   for (int i = 0; i < 2; ++i) {
329     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR:%.+]])
330     // BLOCKS: alloca i{{[0-9]+}},
331     // BLOCKS: alloca i{{[0-9]+}},
332     // BLOCKS: alloca i{{[0-9]+}},
333     // BLOCKS: alloca i{{[0-9]+}},
334     // BLOCKS: alloca i{{[0-9]+}},
335     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
336     // BLOCKS: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 4
337     // BLOCKS: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
338     // BLOCKS: store i{{[0-9]+}}* [[SIVAR]], i{{[0-9]+}}** [[SIVAR_ADDR:%.+]],
339     // BLOCKS: {{.+}} = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_ADDR]]
340     // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
341     // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
342     // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
343     // BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
344     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
345     // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
346     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
347     // BLOCKS: call void {{%.+}}(i8
348     // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
349     g = 1;
350     g1 = 1;
351     sivar = 2;
352     // Check for final copying of private values back to original vars.
353     // BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
354     // BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
355     // BLOCKS: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
356     // BLOCKS: [[LAST_THEN]]
357     // Actual copying.
358 
359     // original g=private_g;
360     // BLOCKS: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
361     // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
362     // BLOCKS: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
363     // BLOCKS: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}},
364     // BLOCKS: br label %[[LAST_DONE]]
365     // BLOCKS: [[LAST_DONE]]
366     // BLOCKS: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
367     g = 1;
368     g1 = 1;
369     ^{
370       // BLOCKS: define {{.+}} void {{@.+}}(i8*
371       g = 2;
372       g1 = 1;
373       sivar = 4;
374       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
375       // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
376       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
377       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
378       // BLOCKS: store i{{[0-9]+}} 4, i{{[0-9]+}}*
379       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
380       // BLOCKS: ret
381     }();
382   }
383   }();
384   return 0;
385 // BLOCKS: define {{.+}} @{{.+}}([[SS_TY]]*
386 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
387 // BLOCKS: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
388 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
389 // BLOCKS: store i8
390 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
391 // BLOCKS: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
392 // BLOCKS: call void @__kmpc_for_static_init_4(
393 // BLOCKS-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
394 // BLOCKS: call void
395 // BLOCKS: call void @__kmpc_for_static_fini(%
396 // BLOCKS: ret
397 
398 // BLOCKS: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
399 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
400 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
401 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
402 // BLOCKS: call void @__kmpc_for_static_init_4(
403 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
404 // BLOCKS: call{{.*}} void
405 // BLOCKS: call void @__kmpc_for_static_fini(
406 // BLOCKS: br i1
407 // BLOCKS: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
408 // BLOCKS: store i8 %{{.+}}, i8* [[B_REF]],
409 // BLOCKS: br label
410 // BLOCKS: ret void
411 
412 // BLOCKS: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
413 // BLOCKS: alloca i{{[0-9]+}},
414 // BLOCKS: alloca i{{[0-9]+}},
415 // BLOCKS: alloca i{{[0-9]+}},
416 // BLOCKS: alloca i{{[0-9]+}},
417 // BLOCKS: alloca i{{[0-9]+}},
418 // BLOCKS: alloca i{{[0-9]+}},
419 // BLOCKS: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
420 // BLOCKS: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
421 // BLOCKS: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
422 // BLOCKS: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
423 // BLOCKS: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
424 // BLOCKS: call void @__kmpc_for_static_init_4(
425 // BLOCKS: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
426 // BLOCKS-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
427 // BLOCKS-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
428 // BLOCKS-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
429 // BLOCKS-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
430 // BLOCKS-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
431 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
432 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
433 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
434 // BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
435 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
436 // BLOCKS: call void @__kmpc_for_static_fini(
437 // BLOCKS: br i1
438 // BLOCKS: br label
439 // BLOCKS: ret void
440 #else
441   S<float> test;
442   int t_var = 0;
443   int vec[] = {1, 2};
444   S<float> s_arr[] = {1, 2};
445   S<float> var(3);
446 #pragma omp parallel
447 #pragma omp for lastprivate(t_var, vec, s_arr, var, sivar)
448   for (int i = 0; i < 2; ++i) {
449     vec[i] = t_var;
450     s_arr[i] = var;
451     sivar += i;
452   }
453 #pragma omp parallel
454 #pragma omp for lastprivate(A::x, B::x) firstprivate(f) lastprivate(f)
455   for (int i = 0; i < 2; ++i) {
456     A::x++;
457   }
458 #pragma omp parallel
459 #pragma omp for firstprivate(f) lastprivate(f)
460   for (int i = 0; i < 2; ++i) {
461     A::x++;
462   }
463 #pragma omp parallel
464 #pragma omp for lastprivate(cnt)
465   for (cnt = 0; cnt < 2; ++cnt) {
466     A::x++;
467   }
468   return tmain<int>();
469 #endif
470 }
471 
472 // CHECK: define i{{[0-9]+}} @main()
473 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
474 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
475 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i32*)* [[MAIN_MICROTASK:@.+]] to void
476 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK1:@.+]] to void
477 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK2:@.+]] to void
478 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK3:@.+]] to void
479 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]()
480 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
481 // CHECK: ret
482 
483 // CHECK: define internal void [[MAIN_MICROTASK]](i32* noalias [[GTID_ADDR:%.+]], i32* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [2 x [[S_FLOAT_TY]]]* dereferenceable(8) %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}})
484 // CHECK: alloca i{{[0-9]+}},
485 // CHECK: alloca i{{[0-9]+}},
486 // CHECK: alloca i{{[0-9]+}},
487 // CHECK: alloca i{{[0-9]+}},
488 // CHECK: alloca i{{[0-9]+}},
489 // CHECK: alloca i{{[0-9]+}},
490 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
491 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
492 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]],
493 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]],
494 // CHECK: [[SIVAR_PRIV:%.+]] = alloca i{{[0-9]+}},
495 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
496 
497 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
498 // CHECK: [[VEC_REF:%.+]] = load [2 x i32]*, [2 x i32]** %
499 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** %
500 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** %
501 
502 // Check for default initialization.
503 // CHECK-NOT: [[T_VAR_PRIV]]
504 // CHECK-NOT: [[VEC_PRIV]]
505 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]*
506 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]])
507 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
508 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
509 // <Skip loop body>
510 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
511 
512 // Check for final copying of private values back to original vars.
513 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
514 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
515 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
516 // CHECK: [[LAST_THEN]]
517 // Actual copying.
518 
519 // original t_var=private_t_var;
520 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
521 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
522 
523 // original vec[]=private_vec[];
524 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
525 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
526 // CHECK: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_DEST]], i8* align {{[0-9]+}} [[VEC_SRC]],
527 
528 // original s_arr[]=private_s_arr[];
529 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
530 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]] to [[S_FLOAT_TY]]*
531 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
532 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
533 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
534 // CHECK: [[S_ARR_BODY]]
535 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
536 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
537 // CHECK: [[S_ARR_BODY_DONE]]
538 
539 // original var=private_var;
540 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* {{.*}} [[VAR_PRIV]])
541 // CHECK: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIV]],
542 // CHECK: br label %[[LAST_DONE]]
543 // CHECK: [[LAST_DONE]]
544 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
545 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
546 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
547 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
548 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
549 // CHECK: ret void
550 
551 //
552 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
553 // CHECK: [[F_PRIV:%.+]] = alloca float,
554 // CHECK-NOT: alloca float
555 // CHECK: [[X_PRIV:%.+]] = alloca double,
556 // CHECK-NOT: alloca float
557 // CHECK-NOT: alloca double
558 
559 // Check for default initialization.
560 // CHECK-NOT: [[X_PRIV]]
561 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]],
562 // CHECK: store float [[F_VAL]], float* [[F_PRIV]],
563 // CHECK-NOT: [[X_PRIV]]
564 
565 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
566 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
567 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
568 // <Skip loop body>
569 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
570 
571 // Check for final copying of private values back to original vars.
572 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
573 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
574 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
575 // CHECK: [[LAST_THEN]]
576 // Actual copying.
577 
578 // original x=private_x;
579 // CHECK: [[X_VAL:%.+]] = load double, double* [[X_PRIV]],
580 // CHECK: store double [[X_VAL]], double* [[X]],
581 
582 // original f=private_f;
583 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
584 // CHECK: store float [[F_VAL]], float* [[F]],
585 
586 // CHECK-NEXT: br label %[[LAST_DONE]]
587 // CHECK: [[LAST_DONE]]
588 
589 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
590 // CHECK: ret void
591 
592 // CHECK: define internal void [[MAIN_MICROTASK2]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
593 // CHECK: [[F_PRIV:%.+]] = alloca float,
594 // CHECK-NOT: alloca float
595 
596 // Check for default initialization.
597 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]],
598 // CHECK: store float [[F_VAL]], float* [[F_PRIV]],
599 
600 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
601 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
602 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
603 // <Skip loop body>
604 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
605 
606 // Check for final copying of private values back to original vars.
607 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
608 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
609 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
610 // CHECK: [[LAST_THEN]]
611 // Actual copying.
612 
613 // original f=private_f;
614 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
615 // CHECK: store float [[F_VAL]], float* [[F]],
616 
617 // CHECK-NEXT: br label %[[LAST_DONE]]
618 // CHECK: [[LAST_DONE]]
619 
620 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
621 // CHECK: ret void
622 
623 // CHECK: define internal void [[MAIN_MICROTASK3]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
624 // CHECK: alloca i8,
625 // CHECK: [[CNT_PRIV:%.+]] = alloca i8,
626 
627 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
628 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
629 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1)
630 // UB = min(UB, GlobalUB)
631 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
632 // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 1
633 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]]
634 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 1, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ]
635 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]]
636 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]]
637 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]]
638 // <Skip loop body>
639 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
640 
641 // Check for final copying of private values back to original vars.
642 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
643 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
644 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
645 // CHECK: [[LAST_THEN]]
646 
647 // Calculate private cnt value.
648 // CHECK: store i8 2, i8* [[CNT_PRIV]]
649 // original cnt=private_cnt;
650 // CHECK: [[CNT_VAL:%.+]] = load i8, i8* [[CNT_PRIV]],
651 // CHECK: store i8 [[CNT_VAL]], i8* [[CNT]],
652 
653 // CHECK-NEXT: br label %[[LAST_DONE]]
654 // CHECK: [[LAST_DONE]]
655 
656 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
657 // CHECK: ret void
658 
659 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
660 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
661 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
662 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void
663 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
664 // CHECK: ret
665 
666 // CHECK: define {{.+}} @{{.+}}([[SS_TY]]*
667 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
668 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
669 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
670 // CHECK: store i8
671 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
672 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
673 // CHECK: call void @__kmpc_for_static_init_4(
674 // CHECK-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
675 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
676 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
677 // CHECK: call void @__kmpc_for_static_fini(%
678 // CHECK: ret
679 
680 // CHECK: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
681 // CHECK: alloca i{{[0-9]+}},
682 // CHECK: alloca i{{[0-9]+}},
683 // CHECK: alloca i{{[0-9]+}},
684 // CHECK: alloca i{{[0-9]+}},
685 // CHECK: alloca i{{[0-9]+}},
686 // CHECK: alloca i{{[0-9]+}},
687 // CHECK: alloca i{{[0-9]+}},
688 // CHECK: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
689 // CHECK: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
690 // CHECK: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
691 // CHECK: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
692 // CHECK: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
693 // CHECK: call void @__kmpc_for_static_init_4(
694 // CHECK: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
695 // CHECK-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
696 // CHECK-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
697 // CHECK-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
698 // CHECK-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
699 // CHECK-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
700 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
701 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
702 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
703 // CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
704 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
705 // CHECK: call void @__kmpc_for_static_fini(
706 // CHECK: br i1
707 // CHECK: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
708 // CHECK: store i8 %{{.+}}, i8* [[B_REF]],
709 // CHECK: br label
710 // CHECK: ret void
711 
712 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [2 x [[S_INT_TY]]]* dereferenceable(8) %{{.+}}, [[S_INT_TY]]* dereferenceable(4) %{{.+}})
713 // CHECK: alloca i{{[0-9]+}},
714 // CHECK: alloca i{{[0-9]+}},
715 // CHECK: alloca i{{[0-9]+}},
716 // CHECK: alloca i{{[0-9]+}},
717 // CHECK: alloca i{{[0-9]+}},
718 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, align 128
719 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], align 128
720 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], align 128
721 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], align 128
722 // CHECK: [[VAR_PRIV_REF:%.+]] = alloca [[S_INT_TY]]*,
723 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
724 
725 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
726 // CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** %
727 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** %
728 
729 // Check for default initialization.
730 // CHECK-NOT: [[T_VAR_PRIV]]
731 // CHECK-NOT: [[VEC_PRIV]]
732 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]*
733 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]])
734 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
735 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]])
736 // CHECK: store [[S_INT_TY]]* [[VAR_PRIV]], [[S_INT_TY]]** [[VAR_PRIV_REF]]
737 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
738 // <Skip loop body>
739 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
740 
741 // Check for final copying of private values back to original vars.
742 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
743 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
744 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
745 // CHECK: [[LAST_THEN]]
746 // Actual copying.
747 
748 // original t_var=private_t_var;
749 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
750 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
751 
752 // original vec[]=private_vec[];
753 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
754 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
755 // CHECK: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_DEST]], i8* align {{[0-9]+}} [[VEC_SRC]],
756 
757 // original s_arr[]=private_s_arr[];
758 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
759 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]*
760 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
761 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
762 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
763 // CHECK: [[S_ARR_BODY]]
764 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
765 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
766 // CHECK: [[S_ARR_BODY_DONE]]
767 
768 // original var=private_var;
769 // CHECK: [[VAR_PRIV1:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_PRIV_REF]],
770 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV1]])
771 // CHECK: br label %[[LAST_DONE]]
772 // CHECK: [[LAST_DONE]]
773 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
774 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]*
775 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
776 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
777 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
778 // CHECK: ret void
779 #endif
780 
781