1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 // expected-no-diagnostics
7 // REQUIRES: x86-registered-target
8 #ifndef HEADER
9 #define HEADER
10 
11 volatile double g;
12 
13 template <class T>
14 struct S {
15   T f;
16   S(T a) : f(a + g) {}
17   S() : f(g) {}
18   operator T() { return T(); }
19   S &operator&(const S &) { return *this; }
20   ~S() {}
21 };
22 
23 // CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
24 // CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
25 // CHECK-DAG: [[ATOMIC_REDUCE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8*
26 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
27 // CHECK-DAG: [[SINGLE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 322, i32 0, i32 0, i8*
28 // CHECK-DAG: [[REDUCTION_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8*
29 // CHECK-DAG: [[REDUCTION_LOCK:@.+]] = common global [8 x i32] zeroinitializer
30 
31 template <typename T>
32 T tmain() {
33   T t;
34   S<T> test;
35   T t_var = T(), t_var1;
36   T vec[] = {1, 2};
37   S<T> s_arr[] = {1, 2};
38   S<T> var(3), var1;
39 #pragma omp parallel
40 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait
41   {
42     vec[0] = t_var;
43 #pragma omp section
44     s_arr[0] = var;
45   }
46   return T();
47 }
48 
49 int main() {
50 #ifdef LAMBDA
51   // LAMBDA: [[G:@.+]] = global double
52   // LAMBDA-LABEL: @main
53   // LAMBDA: call void [[OUTER_LAMBDA:@.+]](
54   [&]() {
55   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
56   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
57 #pragma omp parallel
58 #pragma omp sections reduction(+:g)
59     {
60     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
61     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double,
62 
63     // Reduction list for runtime.
64     // LAMBDA: [[RED_LIST:%.+]] = alloca [1 x i8*],
65 
66     // LAMBDA: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
67     // LAMBDA: call void @__kmpc_for_static_init_4(
68     g = 1;
69     // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
70     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
71     // LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]]
72     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
73     // LAMBDA: call void @__kmpc_for_static_fini(
74 
75     // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
76     // LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
77     // LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
78     // LAMBDA: call i32 @__kmpc_reduce(
79     // LAMBDA: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [
80     // LAMBDA: i32 1, label %[[CASE1:.+]]
81     // LAMBDA: i32 2, label %[[CASE2:.+]]
82     // LAMBDA: [[CASE1]]
83     // LAMBDA: [[G_VAL:%.+]] = load double, double* [[G]]
84     // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
85     // LAMBDA: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]]
86     // LAMBDA: store double [[ADD]], double* [[G]]
87     // LAMBDA: call void @__kmpc_end_reduce(
88     // LAMBDA: br label %[[REDUCTION_DONE]]
89     // LAMBDA: [[CASE2]]
90     // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
91     // LAMBDA: fadd double
92     // LAMBDA: cmpxchg i64*
93     // LAMBDA: call void @__kmpc_end_reduce(
94     // LAMBDA: br label %[[REDUCTION_DONE]]
95     // LAMBDA: [[REDUCTION_DONE]]
96     // LAMBDA: ret void
97 #pragma omp section
98     [&]() {
99       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
100       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
101       g = 2;
102       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
103       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
104       // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
105       // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]]
106     }();
107   }
108   }();
109   return 0;
110 #elif defined(BLOCKS)
111   // BLOCKS: [[G:@.+]] = global double
112   // BLOCKS-LABEL: @main
113   // BLOCKS: call void {{%.+}}(i8
114   ^{
115   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
116   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
117 #pragma omp parallel
118 #pragma omp sections reduction(-:g)
119     {
120     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
121     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca double,
122 
123     // Reduction list for runtime.
124     // BLOCKS: [[RED_LIST:%.+]] = alloca [1 x i8*],
125 
126     // BLOCKS: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
127     g = 1;
128     // BLOCKS: call void @__kmpc_for_static_init_4(
129     // BLOCKS: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
130     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
131     // BLOCKS: double* [[G_PRIVATE_ADDR]]
132     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
133     // BLOCKS: call void {{%.+}}(i8
134     // BLOCKS: call void @__kmpc_for_static_fini(
135 
136     // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
137     // BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
138     // BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
139     // BLOCKS: call i32 @__kmpc_reduce(
140     // BLOCKS: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [
141     // BLOCKS: i32 1, label %[[CASE1:.+]]
142     // BLOCKS: i32 2, label %[[CASE2:.+]]
143     // BLOCKS: [[CASE1]]
144     // BLOCKS: [[G_VAL:%.+]] = load double, double* [[G]]
145     // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
146     // BLOCKS: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]]
147     // BLOCKS: store double [[ADD]], double* [[G]]
148     // BLOCKS: call void @__kmpc_end_reduce(
149     // BLOCKS: br label %[[REDUCTION_DONE]]
150     // BLOCKS: [[CASE2]]
151     // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
152     // BLOCKS: fadd double
153     // BLOCKS: cmpxchg i64*
154     // BLOCKS: call void @__kmpc_end_reduce(
155     // BLOCKS: br label %[[REDUCTION_DONE]]
156     // BLOCKS: [[REDUCTION_DONE]]
157     // BLOCKS: ret void
158 #pragma omp section
159     ^{
160       // BLOCKS: define {{.+}} void {{@.+}}(i8*
161       g = 2;
162       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
163       // BLOCKS: store double 2.0{{.+}}, double*
164       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
165       // BLOCKS: ret
166     }();
167   }
168   }();
169   return 0;
170 #else
171   S<float> test;
172   float t_var = 0, t_var1;
173   int vec[] = {1, 2};
174   S<float> s_arr[] = {1, 2};
175   S<float> var(3), var1;
176 #pragma omp parallel
177 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
178   {
179     {
180     vec[0] = t_var;
181     s_arr[0] = var;
182     vec[1] = t_var1;
183     s_arr[1] = var1;
184     }
185   }
186   return tmain<int>();
187 #endif
188 }
189 
190 // CHECK: define {{.*}}i{{[0-9]+}} @main()
191 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
192 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
193 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, float*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK:@.+]] to void
194 // CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
195 // CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
196 // CHECK: ret
197 //
198 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
199 // CHECK-NOT: alloca float,
200 // CHECK-NOT: alloca [[S_FLOAT_TY]],
201 // CHECK-NOT: alloca [[S_FLOAT_TY]],
202 // CHECK-NOT: alloca float,
203 
204 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
205 
206 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
207 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
208 // CHECK: call i32 @__kmpc_single(
209 
210 // CHECK-NOT: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
211 // CHECK-NOT: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
212 
213 // CHECK: call void @__kmpc_end_single(
214 
215 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[SINGLE_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
216 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
217 
218 // CHECK: ret void
219 
220 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
221 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
222 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
223 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [[S_INT_TY]]*, [[S_INT_TY]]*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*)* [[TMAIN_MICROTASK:@.+]] to void
224 // CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
225 // CHECK: ret
226 //
227 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
228 // CHECK: alloca i{{[0-9]+}},
229 // CHECK: alloca i{{[0-9]+}},
230 // CHECK: alloca i{{[0-9]+}},
231 // CHECK: alloca i{{[0-9]+}},
232 // CHECK: alloca i{{[0-9]+}},
233 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
234 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]],
235 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_INT_TY]],
236 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca i{{[0-9]+}},
237 
238 // Reduction list for runtime.
239 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*],
240 
241 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
242 
243 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
244 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
245 // CHECK: [[VAR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
246 // CHECK: [[T_VAR1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
247 
248 // For + reduction operation initial value of private variable is 0.
249 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[T_VAR_PRIV]],
250 
251 // For & reduction operation initial value of private variable is ones in all bits.
252 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR_PRIV]])
253 
254 // For && reduction operation initial value of private variable is 1.0.
255 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR1_PRIV]])
256 
257 // For min reduction operation initial value of private variable is largest repesentable value.
258 // CHECK: store i{{[0-9]+}} 2147483647, i{{[0-9]+}}* [[T_VAR1_PRIV]],
259 
260 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
261 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
262 // CHECK: call void @__kmpc_for_static_init_4(
263 // Skip checks for internal operations.
264 // CHECK: call void @__kmpc_for_static_fini(
265 
266 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
267 
268 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
269 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8*
270 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
271 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
272 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8*
273 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
274 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
275 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8*
276 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
277 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
278 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8*
279 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
280 
281 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
282 
283 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8*
284 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]])
285 
286 // switch(res)
287 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [
288 // CHECK: i32 1, label %[[CASE1:.+]]
289 // CHECK: i32 2, label %[[CASE2:.+]]
290 // CHECK: ]
291 
292 // case 1:
293 // t_var += t_var_reduction;
294 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]],
295 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
296 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]]
297 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_REF]],
298 
299 // var = var.operator &(var_reduction);
300 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]])
301 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8*
302 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
303 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
304 
305 // var1 = var1.operator &&(var1_reduction);
306 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]])
307 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
308 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
309 // CHECK: [[TRUE]]
310 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]])
311 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
312 // CHECK: br label %[[END2]]
313 // CHECK: [[END2]]
314 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
315 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
316 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
317 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8*
318 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
319 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
320 
321 // t_var1 = min(t_var1, t_var1_reduction);
322 // CHECK: [[T_VAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_REF]],
323 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]],
324 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]]
325 // CHECK: br i1 [[CMP]]
326 // CHECK: [[UP:%.+]] = phi i32
327 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_REF]],
328 
329 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
330 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]])
331 
332 // break;
333 // CHECK: br label %[[RED_DONE]]
334 
335 // case 2:
336 // t_var += t_var_reduction;
337 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]]
338 // CHECK: atomicrmw add i32* [[T_VAR_REF]], i32 [[T_VAR_PRIV_VAL]] monotonic
339 
340 // var = var.operator &(var_reduction);
341 // CHECK: call void @__kmpc_critical(
342 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]])
343 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8*
344 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
345 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
346 // CHECK: call void @__kmpc_end_critical(
347 
348 // var1 = var1.operator &&(var1_reduction);
349 // CHECK: call void @__kmpc_critical(
350 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]])
351 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
352 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
353 // CHECK: [[TRUE]]
354 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]])
355 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
356 // CHECK: br label %[[END2]]
357 // CHECK: [[END2]]
358 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
359 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
360 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
361 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8*
362 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
363 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
364 // CHECK: call void @__kmpc_end_critical(
365 
366 // t_var1 = min(t_var1, t_var1_reduction);
367 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]]
368 // CHECK: atomicrmw min i32* [[T_VAR1_REF]], i32 [[T_VAR1_PRIV_VAL]] monotonic
369 
370 // break;
371 // CHECK: br label %[[RED_DONE]]
372 // CHECK: [[RED_DONE]]
373 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
374 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]*
375 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
376 // CHECK: ret void
377 
378 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
379 //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
380 //  ...
381 //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
382 //  *(Type<n>-1*)rhs[<n>-1]);
383 // }
384 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
385 // t_var_lhs = (i{{[0-9]+}}*)lhs[0];
386 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
387 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
388 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}*
389 // t_var_rhs = (i{{[0-9]+}}*)rhs[0];
390 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
391 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
392 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}*
393 
394 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
395 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
396 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
397 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]*
398 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
399 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
400 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
401 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]*
402 
403 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
404 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
405 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
406 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]*
407 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
408 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
409 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
410 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]*
411 
412 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
413 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
414 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
415 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}*
416 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
417 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
418 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
419 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}*
420 
421 // t_var_lhs += t_var_rhs;
422 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_LHS]],
423 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_RHS]],
424 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]]
425 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_LHS]],
426 
427 // var_lhs = var_lhs.operator &(var_rhs);
428 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_LHS]], [[S_INT_TY]]* dereferenceable(4) [[VAR_RHS]])
429 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_LHS]] to i8*
430 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
431 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
432 
433 // var1_lhs = var1_lhs.operator &&(var1_rhs);
434 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_LHS]])
435 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
436 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
437 // CHECK: [[TRUE]]
438 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_RHS]])
439 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
440 // CHECK: br label %[[END2]]
441 // CHECK: [[END2]]
442 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
443 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
444 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
445 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_LHS]] to i8*
446 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
447 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
448 
449 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
450 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_LHS]],
451 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_RHS]],
452 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]]
453 // CHECK: br i1 [[CMP]]
454 // CHECK: [[UP:%.+]] = phi i32
455 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_LHS]],
456 // CHECK: ret void
457 
458 #endif
459 
460