1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 // expected-no-diagnostics
7 // REQUIRES: x86-registered-target
8 #ifndef HEADER
9 #define HEADER
10 
11 template <class T>
12 struct S {
13   T f;
14   S(T a) : f(a) {}
15   S() : f() {}
16   S<T> &operator=(const S<T> &);
17   operator T() { return T(); }
18   ~S() {}
19 };
20 
21 volatile int g __attribute__((aligned(128)))= 1212;
22 volatile int &g1 = g;
23 float f;
24 char cnt;
25 
26 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
27 // CHECK: [[S_INT_TY:%.+]] = type { i32 }
28 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
29 // CHECK-DAG: [[X:@.+]] = global double 0.0
30 // CHECK-DAG: [[F:@.+]] = global float 0.0
31 // CHECK-DAG: [[CNT:@.+]] = global i8 0
32 template <typename T>
33 T tmain() {
34   S<T> test;
35   T t_var __attribute__((aligned(128))) = T();
36   T vec[] __attribute__((aligned(128))) = {1, 2};
37   S<T> s_arr[] __attribute__((aligned(128))) = {1, 2};
38   S<T> &var __attribute__((aligned(128))) = test;
39 #pragma omp parallel
40 #pragma omp for lastprivate(t_var, vec, s_arr, var)
41   for (int i = 0; i < 2; ++i) {
42     vec[i] = t_var;
43     s_arr[i] = var;
44   }
45   return T();
46 }
47 
48 namespace A {
49 double x;
50 }
51 namespace B {
52 using A::x;
53 }
54 
55 int main() {
56   static int sivar;
57 #ifdef LAMBDA
58   // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
59   // LAMBDA: [[SIVAR:@.+]] = internal global i{{[0-9]+}} 0,
60   // LAMBDA-LABEL: @main
61   // LAMBDA: call void [[OUTER_LAMBDA:@.+]](
62   [&]() {
63   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
64   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* %{{.+}})
65 #pragma omp parallel
66 #pragma omp for lastprivate(g, g1, sivar)
67   for (int i = 0; i < 2; ++i) {
68     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR:%.+]])
69     // LAMBDA: alloca i{{[0-9]+}},
70     // LAMBDA: alloca i{{[0-9]+}},
71     // LAMBDA: alloca i{{[0-9]+}},
72     // LAMBDA: alloca i{{[0-9]+}},
73     // LAMBDA: alloca i{{[0-9]+}},
74     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
75     // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
76     // LAMBDA: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
77     // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}},
78 
79     // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
80     // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
81 
82     // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
83     // LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
84     // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
85     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
86     // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
87     // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
88     // LAMBDA: store i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]], i{{[0-9]+}}** [[SIVAR_PRIVATE_ADDR_REF]]
89     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
90     // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
91     g = 1;
92     g1 = 1;
93     sivar = 2;
94     // Check for final copying of private values back to original vars.
95     // LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
96     // LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
97     // LAMBDA: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
98     // LAMBDA: [[LAST_THEN]]
99     // Actual copying.
100 
101     // original g=private_g;
102     // LAMBDA: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
103     // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
104 
105     // original sivar=private_sivar;
106     // LAMBDA: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
107     // LAMBDA: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}},
108     // LAMBDA: br label %[[LAST_DONE]]
109     // LAMBDA: [[LAST_DONE]]
110     // LAMBDA: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
111     [&]() {
112       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
113       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
114       g = 2;
115       g1 = 2;
116       sivar = 4;
117       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
118       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
119       // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
120       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]]
121       // LAMBDA: [[SIVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
122       // LAMBDA: [[SIVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_PTR_REF]]
123       // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SIVAR_REF]]
124     }();
125   }
126   }();
127   return 0;
128 #elif defined(BLOCKS)
129   // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
130   // BLOCKS-LABEL: @main
131   // BLOCKS: call void {{%.+}}(i8
132   ^{
133   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
134   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
135 #pragma omp parallel
136 #pragma omp for lastprivate(g, g1, sivar)
137   for (int i = 0; i < 2; ++i) {
138     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR:%.+]])
139     // BLOCKS: alloca i{{[0-9]+}},
140     // BLOCKS: alloca i{{[0-9]+}},
141     // BLOCKS: alloca i{{[0-9]+}},
142     // BLOCKS: alloca i{{[0-9]+}},
143     // BLOCKS: alloca i{{[0-9]+}},
144     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128
145     // BLOCKS: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 4
146     // BLOCKS: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
147     // BLOCKS: store i{{[0-9]+}}* [[SIVAR]], i{{[0-9]+}}** [[SIVAR_ADDR:%.+]],
148     // BLOCKS: {{.+}} = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_ADDR]]
149     // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
150     // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
151     // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
152     // BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
153     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
154     // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
155     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
156     // BLOCKS: call void {{%.+}}(i8
157     // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
158     g = 1;
159     g1 = 1;
160     sivar = 2;
161     // Check for final copying of private values back to original vars.
162     // BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
163     // BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
164     // BLOCKS: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
165     // BLOCKS: [[LAST_THEN]]
166     // Actual copying.
167 
168     // original g=private_g;
169     // BLOCKS: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
170     // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
171     // BLOCKS: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]],
172     // BLOCKS: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}},
173     // BLOCKS: br label %[[LAST_DONE]]
174     // BLOCKS: [[LAST_DONE]]
175     // BLOCKS: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
176     g = 1;
177     g1 = 1;
178     ^{
179       // BLOCKS: define {{.+}} void {{@.+}}(i8*
180       g = 2;
181       g1 = 1;
182       sivar = 4;
183       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
184       // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
185       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
186       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
187       // BLOCKS: store i{{[0-9]+}} 4, i{{[0-9]+}}*
188       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
189       // BLOCKS: ret
190     }();
191   }
192   }();
193   return 0;
194 #else
195   S<float> test;
196   int t_var = 0;
197   int vec[] = {1, 2};
198   S<float> s_arr[] = {1, 2};
199   S<float> var(3);
200 #pragma omp parallel
201 #pragma omp for lastprivate(t_var, vec, s_arr, var, sivar)
202   for (int i = 0; i < 2; ++i) {
203     vec[i] = t_var;
204     s_arr[i] = var;
205     sivar += i;
206   }
207 #pragma omp parallel
208 #pragma omp for lastprivate(A::x, B::x) firstprivate(f) lastprivate(f)
209   for (int i = 0; i < 2; ++i) {
210     A::x++;
211   }
212 #pragma omp parallel
213 #pragma omp for firstprivate(f) lastprivate(f)
214   for (int i = 0; i < 2; ++i) {
215     A::x++;
216   }
217 #pragma omp parallel
218 #pragma omp for lastprivate(cnt)
219   for (cnt = 0; cnt < 2; ++cnt) {
220     A::x++;
221   }
222   return tmain<int>();
223 #endif
224 }
225 
226 // CHECK: define i{{[0-9]+}} @main()
227 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
228 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
229 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i32*)* [[MAIN_MICROTASK:@.+]] to void
230 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK1:@.+]] to void
231 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK2:@.+]] to void
232 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK3:@.+]] to void
233 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]()
234 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
235 // CHECK: ret
236 
237 // CHECK: define internal void [[MAIN_MICROTASK]](i32* noalias [[GTID_ADDR:%.+]], i32* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [2 x [[S_FLOAT_TY]]]* dereferenceable(8) %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}})
238 // CHECK: alloca i{{[0-9]+}},
239 // CHECK: alloca i{{[0-9]+}},
240 // CHECK: alloca i{{[0-9]+}},
241 // CHECK: alloca i{{[0-9]+}},
242 // CHECK: alloca i{{[0-9]+}},
243 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
244 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
245 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]],
246 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]],
247 // CHECK: [[SIVAR_PRIV:%.+]] = alloca i{{[0-9]+}},
248 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
249 
250 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
251 // CHECK: [[VEC_REF:%.+]] = load [2 x i32]*, [2 x i32]** %
252 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** %
253 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** %
254 
255 // Check for default initialization.
256 // CHECK-NOT: [[T_VAR_PRIV]]
257 // CHECK-NOT: [[VEC_PRIV]]
258 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]*
259 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]])
260 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
261 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
262 // <Skip loop body>
263 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
264 
265 // Check for final copying of private values back to original vars.
266 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
267 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
268 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
269 // CHECK: [[LAST_THEN]]
270 // Actual copying.
271 
272 // original t_var=private_t_var;
273 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
274 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
275 
276 // original vec[]=private_vec[];
277 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
278 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
279 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VEC_DEST]], i8* [[VEC_SRC]],
280 
281 // original s_arr[]=private_s_arr[];
282 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
283 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]] to [[S_FLOAT_TY]]*
284 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
285 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
286 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
287 // CHECK: [[S_ARR_BODY]]
288 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
289 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
290 // CHECK: [[S_ARR_BODY_DONE]]
291 
292 // original var=private_var;
293 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* {{.*}} [[VAR_PRIV]])
294 // CHECK: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIV]],
295 // CHECK: br label %[[LAST_DONE]]
296 // CHECK: [[LAST_DONE]]
297 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
298 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
299 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
300 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
301 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
302 // CHECK: ret void
303 
304 //
305 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
306 // CHECK: [[F_PRIV:%.+]] = alloca float,
307 // CHECK-NOT: alloca float
308 // CHECK: [[X_PRIV:%.+]] = alloca double,
309 // CHECK-NOT: alloca float
310 // CHECK-NOT: alloca double
311 
312 // Check for default initialization.
313 // CHECK-NOT: [[X_PRIV]]
314 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]],
315 // CHECK: store float [[F_VAL]], float* [[F_PRIV]],
316 // CHECK-NOT: [[X_PRIV]]
317 
318 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
319 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
320 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
321 // <Skip loop body>
322 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
323 
324 // Check for final copying of private values back to original vars.
325 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
326 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
327 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
328 // CHECK: [[LAST_THEN]]
329 // Actual copying.
330 
331 // original x=private_x;
332 // CHECK: [[X_VAL:%.+]] = load double, double* [[X_PRIV]],
333 // CHECK: store double [[X_VAL]], double* [[X]],
334 
335 // original f=private_f;
336 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
337 // CHECK: store float [[F_VAL]], float* [[F]],
338 
339 // CHECK-NEXT: br label %[[LAST_DONE]]
340 // CHECK: [[LAST_DONE]]
341 
342 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
343 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
344 // CHECK: ret void
345 
346 // CHECK: define internal void [[MAIN_MICROTASK2]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
347 // CHECK: [[F_PRIV:%.+]] = alloca float,
348 // CHECK-NOT: alloca float
349 
350 // Check for default initialization.
351 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]],
352 // CHECK: store float [[F_VAL]], float* [[F_PRIV]],
353 
354 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
355 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
356 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
357 // <Skip loop body>
358 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
359 
360 // Check for final copying of private values back to original vars.
361 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
362 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
363 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
364 // CHECK: [[LAST_THEN]]
365 // Actual copying.
366 
367 // original f=private_f;
368 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]],
369 // CHECK: store float [[F_VAL]], float* [[F]],
370 
371 // CHECK-NEXT: br label %[[LAST_DONE]]
372 // CHECK: [[LAST_DONE]]
373 
374 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
375 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
376 // CHECK: ret void
377 
378 // CHECK: define internal void [[MAIN_MICROTASK3]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
379 // CHECK: [[CNT_PRIV:%.+]] = alloca i8,
380 
381 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
382 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
383 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1)
384 // UB = min(UB, GlobalUB)
385 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
386 // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 1
387 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]]
388 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 1, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ]
389 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]]
390 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]]
391 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]]
392 // <Skip loop body>
393 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
394 
395 // Check for final copying of private values back to original vars.
396 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
397 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
398 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
399 // CHECK: [[LAST_THEN]]
400 
401 // Calculate last iter count
402 // CHECK: store i32 1, i32* [[OMP_IV]]
403 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]
404 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 [[IV1_1]], 1
405 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[OMP_IV]]
406 // Actual copying.
407 
408 // original cnt=private_cnt;
409 // Calculate private cnt value.
410 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]
411 // CHECK: [[MUL:%.+]] = mul nsw i32 [[IV1_1]], 1
412 // CHECK: [[ADD:%.+]] = add nsw i32 0, [[MUL]]
413 // CHECK: [[CONV:%.+]] = trunc i32 [[ADD]] to i8
414 // CHECK: store i8 [[CONV]], i8* [[CNT_PRIV]]
415 // CHECK: [[CNT_VAL:%.+]] = load i8, i8* [[CNT_PRIV]],
416 // CHECK: store i8 [[CNT_VAL]], i8* [[CNT]],
417 
418 // CHECK-NEXT: br label %[[LAST_DONE]]
419 // CHECK: [[LAST_DONE]]
420 
421 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
422 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
423 // CHECK: ret void
424 
425 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
426 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
427 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
428 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void
429 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
430 // CHECK: ret
431 //
432 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [2 x [[S_INT_TY]]]* dereferenceable(8) %{{.+}}, [[S_INT_TY]]* dereferenceable(4) %{{.+}})
433 // CHECK: alloca i{{[0-9]+}},
434 // CHECK: alloca i{{[0-9]+}},
435 // CHECK: alloca i{{[0-9]+}},
436 // CHECK: alloca i{{[0-9]+}},
437 // CHECK: alloca i{{[0-9]+}},
438 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, align 128
439 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], align 128
440 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], align 128
441 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], align 128
442 // CHECK: [[VAR_PRIV_REF:%.+]] = alloca [[S_INT_TY]]*,
443 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
444 
445 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
446 // CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** %
447 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** %
448 
449 // Check for default initialization.
450 // CHECK-NOT: [[T_VAR_PRIV]]
451 // CHECK-NOT: [[VEC_PRIV]]
452 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]*
453 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]])
454 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
455 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]])
456 // CHECK: store [[S_INT_TY]]* [[VAR_PRIV]], [[S_INT_TY]]** [[VAR_PRIV_REF]]
457 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
458 // <Skip loop body>
459 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
460 
461 // Check for final copying of private values back to original vars.
462 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
463 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
464 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
465 // CHECK: [[LAST_THEN]]
466 // Actual copying.
467 
468 // original t_var=private_t_var;
469 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
470 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
471 
472 // original vec[]=private_vec[];
473 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
474 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
475 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VEC_DEST]], i8* [[VEC_SRC]],
476 
477 // original s_arr[]=private_s_arr[];
478 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
479 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]*
480 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
481 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
482 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
483 // CHECK: [[S_ARR_BODY]]
484 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
485 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
486 // CHECK: [[S_ARR_BODY_DONE]]
487 
488 // original var=private_var;
489 // CHECK: [[VAR_PRIV1:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_PRIV_REF]],
490 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV1]])
491 // CHECK: br label %[[LAST_DONE]]
492 // CHECK: [[LAST_DONE]]
493 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
494 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]*
495 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
496 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
497 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
498 // CHECK: ret void
499 #endif
500 
501