1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 // expected-no-diagnostics
7 #ifndef HEADER
8 #define HEADER
9 
10 volatile double g;
11 
12 template <class T>
13 struct S {
14   T f;
15   S(T a) : f(a + g) {}
16   S() : f(g) {}
17   operator T() { return T(); }
18   S &operator&(const S &) { return *this; }
19   ~S() {}
20 };
21 
22 // CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
23 // CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
24 // CHECK-DAG: [[ATOMIC_REDUCE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8*
25 // CHECK-DAG: [[REDUCTION_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8*
26 // CHECK-DAG: [[REDUCTION_LOCK:@.+]] = common global [8 x i32] zeroinitializer
27 
28 template <typename T>
29 T tmain() {
30   T t;
31   S<T> test;
32   T t_var = T(), t_var1;
33   T vec[] = {1, 2};
34   S<T> s_arr[] = {1, 2};
35   S<T> var(3), var1;
36 #pragma omp parallel
37 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait
38   {
39     vec[0] = t_var;
40 #pragma omp section
41     s_arr[0] = var;
42   }
43   return T();
44 }
45 
46 int main() {
47 #ifdef LAMBDA
48   // LAMBDA: [[G:@.+]] = global double
49   // LAMBDA-LABEL: @main
50   // LAMBDA: call void [[OUTER_LAMBDA:@.+]](
51   [&]() {
52   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
53   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
54 #pragma omp parallel
55 #pragma omp sections reduction(+:g)
56     {
57     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
58     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double,
59 
60     // Reduction list for runtime.
61     // LAMBDA: [[RED_LIST:%.+]] = alloca [1 x i8*],
62 
63     // LAMBDA: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
64     // LAMBDA: call void @__kmpc_for_static_init_4(
65     g = 1;
66     // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
67     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
68     // LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]]
69     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
70     // LAMBDA: call void @__kmpc_for_static_fini(
71 
72     // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
73     // LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
74     // LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
75     // LAMBDA: call i32 @__kmpc_reduce(
76     // LAMBDA: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [
77     // LAMBDA: i32 1, label %[[CASE1:.+]]
78     // LAMBDA: i32 2, label %[[CASE2:.+]]
79     // LAMBDA: [[CASE1]]
80     // LAMBDA: [[G_VAL:%.+]] = load double, double* [[G]]
81     // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
82     // LAMBDA: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]]
83     // LAMBDA: store double [[ADD]], double* [[G]]
84     // LAMBDA: call void @__kmpc_end_reduce(
85     // LAMBDA: br label %[[REDUCTION_DONE]]
86     // LAMBDA: [[CASE2]]
87     // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
88     // LAMBDA: fadd double
89     // LAMBDA: cmpxchg i64*
90     // LAMBDA: call void @__kmpc_end_reduce(
91     // LAMBDA: br label %[[REDUCTION_DONE]]
92     // LAMBDA: [[REDUCTION_DONE]]
93     // LAMBDA: ret void
94 #pragma omp section
95     [&]() {
96       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
97       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
98       g = 2;
99       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
100       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
101       // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
102       // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]]
103     }();
104   }
105   }();
106   return 0;
107 #elif defined(BLOCKS)
108   // BLOCKS: [[G:@.+]] = global double
109   // BLOCKS-LABEL: @main
110   // BLOCKS: call void {{%.+}}(i8
111   ^{
112   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
113   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
114 #pragma omp parallel
115 #pragma omp sections reduction(-:g)
116     {
117     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
118     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca double,
119 
120     // Reduction list for runtime.
121     // BLOCKS: [[RED_LIST:%.+]] = alloca [1 x i8*],
122 
123     // BLOCKS: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
124     g = 1;
125     // BLOCKS: call void @__kmpc_for_static_init_4(
126     // BLOCKS: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
127     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
128     // BLOCKS: double* [[G_PRIVATE_ADDR]]
129     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
130     // BLOCKS: call void {{%.+}}(i8
131     // BLOCKS: call void @__kmpc_for_static_fini(
132 
133     // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
134     // BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
135     // BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
136     // BLOCKS: call i32 @__kmpc_reduce(
137     // BLOCKS: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [
138     // BLOCKS: i32 1, label %[[CASE1:.+]]
139     // BLOCKS: i32 2, label %[[CASE2:.+]]
140     // BLOCKS: [[CASE1]]
141     // BLOCKS: [[G_VAL:%.+]] = load double, double* [[G]]
142     // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
143     // BLOCKS: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]]
144     // BLOCKS: store double [[ADD]], double* [[G]]
145     // BLOCKS: call void @__kmpc_end_reduce(
146     // BLOCKS: br label %[[REDUCTION_DONE]]
147     // BLOCKS: [[CASE2]]
148     // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
149     // BLOCKS: fadd double
150     // BLOCKS: cmpxchg i64*
151     // BLOCKS: call void @__kmpc_end_reduce(
152     // BLOCKS: br label %[[REDUCTION_DONE]]
153     // BLOCKS: [[REDUCTION_DONE]]
154     // BLOCKS: ret void
155 #pragma omp section
156     ^{
157       // BLOCKS: define {{.+}} void {{@.+}}(i8*
158       g = 2;
159       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
160       // BLOCKS: store double 2.0{{.+}}, double*
161       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
162       // BLOCKS: ret
163     }();
164   }
165   }();
166   return 0;
167 #else
168   S<float> test;
169   float t_var = 0, t_var1;
170   int vec[] = {1, 2};
171   S<float> s_arr[] = {1, 2};
172   S<float> var(3), var1;
173 #pragma omp parallel
174 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
175   {
176     {
177     vec[0] = t_var;
178     s_arr[0] = var;
179     vec[1] = t_var1;
180     s_arr[1] = var1;
181     }
182   }
183   return tmain<int>();
184 #endif
185 }
186 
187 // CHECK: define {{.*}}i{{[0-9]+}} @main()
188 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
189 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
190 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, float*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK:@.+]] to void
191 // CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
192 // CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
193 // CHECK: ret
194 //
195 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
196 // CHECK: alloca float,
197 // CHECK: alloca [[S_FLOAT_TY]],
198 // CHECK: alloca [[S_FLOAT_TY]],
199 // CHECK: alloca float,
200 
201 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
202 
203 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
204 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
205 
206 // CHECK-NOT: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
207 // CHECK-NOT: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
208 
209 // CHECK: call void @__kmpc_for_static_init_4(
210 // CHECK: call void @__kmpc_for_static_fini(
211 
212 // CHECK: call void @__kmpc_barrier(
213 
214 // CHECK: ret void
215 
216 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
217 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
218 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
219 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [[S_INT_TY]]*, [[S_INT_TY]]*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*)* [[TMAIN_MICROTASK:@.+]] to void
220 // CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
221 // CHECK: ret
222 //
223 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
224 // CHECK: alloca i{{[0-9]+}},
225 // CHECK: alloca i{{[0-9]+}},
226 // CHECK: alloca i{{[0-9]+}},
227 // CHECK: alloca i{{[0-9]+}},
228 // CHECK: alloca i{{[0-9]+}},
229 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
230 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]],
231 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_INT_TY]],
232 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca i{{[0-9]+}},
233 
234 // Reduction list for runtime.
235 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*],
236 
237 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
238 
239 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
240 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
241 // CHECK: [[VAR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
242 // CHECK: [[T_VAR1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
243 
244 // For + reduction operation initial value of private variable is 0.
245 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[T_VAR_PRIV]],
246 
247 // For & reduction operation initial value of private variable is ones in all bits.
248 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR_PRIV]])
249 
250 // For && reduction operation initial value of private variable is 1.0.
251 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR1_PRIV]])
252 
253 // For min reduction operation initial value of private variable is largest repesentable value.
254 // CHECK: store i{{[0-9]+}} 2147483647, i{{[0-9]+}}* [[T_VAR1_PRIV]],
255 
256 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
257 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
258 // CHECK: call void @__kmpc_for_static_init_4(
259 // Skip checks for internal operations.
260 // CHECK: call void @__kmpc_for_static_fini(
261 
262 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
263 
264 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
265 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8*
266 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
267 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
268 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8*
269 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
270 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
271 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8*
272 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
273 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
274 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8*
275 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
276 
277 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
278 
279 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8*
280 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]])
281 
282 // switch(res)
283 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [
284 // CHECK: i32 1, label %[[CASE1:.+]]
285 // CHECK: i32 2, label %[[CASE2:.+]]
286 // CHECK: ]
287 
288 // case 1:
289 // t_var += t_var_reduction;
290 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]],
291 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
292 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]]
293 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_REF]],
294 
295 // var = var.operator &(var_reduction);
296 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]])
297 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8*
298 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
299 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
300 
301 // var1 = var1.operator &&(var1_reduction);
302 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]])
303 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
304 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
305 // CHECK: [[TRUE]]
306 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]])
307 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
308 // CHECK: br label %[[END2]]
309 // CHECK: [[END2]]
310 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
311 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
312 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
313 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8*
314 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
315 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
316 
317 // t_var1 = min(t_var1, t_var1_reduction);
318 // CHECK: [[T_VAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_REF]],
319 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]],
320 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]]
321 // CHECK: br i1 [[CMP]]
322 // CHECK: [[UP:%.+]] = phi i32
323 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_REF]],
324 
325 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
326 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]])
327 
328 // break;
329 // CHECK: br label %[[RED_DONE]]
330 
331 // case 2:
332 // t_var += t_var_reduction;
333 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]]
334 // CHECK: atomicrmw add i32* [[T_VAR_REF]], i32 [[T_VAR_PRIV_VAL]] monotonic
335 
336 // var = var.operator &(var_reduction);
337 // CHECK: call void @__kmpc_critical(
338 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]])
339 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8*
340 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
341 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
342 // CHECK: call void @__kmpc_end_critical(
343 
344 // var1 = var1.operator &&(var1_reduction);
345 // CHECK: call void @__kmpc_critical(
346 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]])
347 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
348 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
349 // CHECK: [[TRUE]]
350 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]])
351 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
352 // CHECK: br label %[[END2]]
353 // CHECK: [[END2]]
354 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
355 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
356 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
357 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8*
358 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
359 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
360 // CHECK: call void @__kmpc_end_critical(
361 
362 // t_var1 = min(t_var1, t_var1_reduction);
363 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]]
364 // CHECK: atomicrmw min i32* [[T_VAR1_REF]], i32 [[T_VAR1_PRIV_VAL]] monotonic
365 
366 // break;
367 // CHECK: br label %[[RED_DONE]]
368 // CHECK: [[RED_DONE]]
369 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
370 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]*
371 // CHECK: ret void
372 
373 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
374 //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
375 //  ...
376 //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
377 //  *(Type<n>-1*)rhs[<n>-1]);
378 // }
379 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
380 // t_var_lhs = (i{{[0-9]+}}*)lhs[0];
381 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
382 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
383 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}*
384 // t_var_rhs = (i{{[0-9]+}}*)rhs[0];
385 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
386 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
387 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}*
388 
389 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
390 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
391 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
392 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]*
393 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
394 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
395 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
396 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]*
397 
398 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
399 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
400 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
401 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]*
402 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
403 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
404 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
405 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]*
406 
407 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
408 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
409 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
410 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}*
411 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
412 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
413 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
414 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}*
415 
416 // t_var_lhs += t_var_rhs;
417 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_LHS]],
418 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_RHS]],
419 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]]
420 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_LHS]],
421 
422 // var_lhs = var_lhs.operator &(var_rhs);
423 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_LHS]], [[S_INT_TY]]* dereferenceable(4) [[VAR_RHS]])
424 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_LHS]] to i8*
425 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
426 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
427 
428 // var1_lhs = var1_lhs.operator &&(var1_rhs);
429 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_LHS]])
430 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
431 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
432 // CHECK: [[TRUE]]
433 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_RHS]])
434 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
435 // CHECK: br label %[[END2]]
436 // CHECK: [[END2]]
437 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
438 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
439 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
440 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_LHS]] to i8*
441 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
442 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
443 
444 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
445 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_LHS]],
446 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_RHS]],
447 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]]
448 // CHECK: br i1 [[CMP]]
449 // CHECK: [[UP:%.+]] = phi i32
450 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_LHS]],
451 // CHECK: ret void
452 
453 #endif
454 
455