1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 // expected-no-diagnostics
7 #ifndef HEADER
8 #define HEADER
9 
10 volatile double g, g_orig;
11 volatile double &g1 = g_orig;
12 
13 template <class T>
14 struct S {
15   T f;
16   S(T a) : f(a + g) {}
17   S() : f(g) {}
18   operator T() { return T(); }
19   S &operator&(const S &) { return *this; }
20   ~S() {}
21 };
22 
23 // CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
24 // CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
25 // CHECK-DAG: [[ATOMIC_REDUCE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8*
26 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
27 // CHECK-DAG: [[REDUCTION_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8*
28 // CHECK-DAG: [[REDUCTION_LOCK:@.+]] = common global [8 x i32] zeroinitializer
29 
30 template <typename T>
31 T tmain() {
32   T t;
33   S<T> test;
34   T t_var = T(), t_var1;
35   T vec[] = {1, 2};
36   S<T> s_arr[] = {1, 2};
37   S<T> &var = test;
38   S<T> var1;
39 #pragma omp parallel
40 #pragma omp for reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait
41   for (int i = 0; i < 2; ++i) {
42     vec[i] = t_var;
43     s_arr[i] = var;
44   }
45 #pragma omp parallel
46 #pragma omp for reduction(&& : t_var)
47   for (int i = 0; i < 2; ++i) {
48     vec[i] = t_var;
49     s_arr[i] = var;
50   }
51   return T();
52 }
53 
54 extern S<float> **foo();
55 
56 int main() {
57 #ifdef LAMBDA
58   // LAMBDA: [[G:@.+]] = global double
59   // LAMBDA-LABEL: @main
60   // LAMBDA: call void [[OUTER_LAMBDA:@.+]](
61   [&]() {
62   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
63   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
64 #pragma omp parallel
65 #pragma omp for reduction(+:g, g1)
66     for (int i = 0; i < 2; ++i) {
67     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
68     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double,
69 
70     // Reduction list for runtime.
71     // LAMBDA: [[RED_LIST:%.+]] = alloca [2 x i8*],
72 
73     // LAMBDA: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
74     // LAMBDA: call void @__kmpc_for_static_init_4(
75     g = 1;
76     g1 = 1;
77     // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
78     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
79     // LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]]
80     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
81     // LAMBDA: call void @__kmpc_for_static_fini(
82 
83     // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i64 0, i64 0
84     // LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
85     // LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
86     // LAMBDA: call i32 @__kmpc_reduce(
87     // LAMBDA: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [
88     // LAMBDA: i32 1, label %[[CASE1:.+]]
89     // LAMBDA: i32 2, label %[[CASE2:.+]]
90     // LAMBDA: [[CASE1]]
91     // LAMBDA: [[G_VAL:%.+]] = load double, double* [[G]]
92     // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
93     // LAMBDA: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]]
94     // LAMBDA: store double [[ADD]], double* [[G]]
95     // LAMBDA: call void @__kmpc_end_reduce(
96     // LAMBDA: br label %[[REDUCTION_DONE]]
97     // LAMBDA: [[CASE2]]
98     // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
99     // LAMBDA: fadd double
100     // LAMBDA: cmpxchg i64*
101     // LAMBDA: call void @__kmpc_end_reduce(
102     // LAMBDA: br label %[[REDUCTION_DONE]]
103     // LAMBDA: [[REDUCTION_DONE]]
104     // LAMBDA: ret void
105     [&]() {
106       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
107       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
108       g = 2;
109       g1 = 2;
110       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
111       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
112       // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
113       // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]]
114     }();
115   }
116   }();
117   return 0;
118 #elif defined(BLOCKS)
119   // BLOCKS: [[G:@.+]] = global double
120   // BLOCKS-LABEL: @main
121   // BLOCKS: call void {{%.+}}(i8
122   ^{
123   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
124   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
125 #pragma omp parallel
126 #pragma omp for reduction(-:g, g1)
127     for (int i = 0; i < 2; ++i)  {
128     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
129     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca double,
130 
131     // Reduction list for runtime.
132     // BLOCKS: [[RED_LIST:%.+]] = alloca [2 x i8*],
133 
134     // BLOCKS: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]]
135     g = 1;
136     g1 = 1;
137     // BLOCKS: call void @__kmpc_for_static_init_4(
138     // BLOCKS: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
139     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
140     // BLOCKS: double* [[G_PRIVATE_ADDR]]
141     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
142     // BLOCKS: call void {{%.+}}(i8
143     // BLOCKS: call void @__kmpc_for_static_fini(
144 
145     // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i64 0, i64 0
146     // BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
147     // BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
148     // BLOCKS: call i32 @__kmpc_reduce(
149     // BLOCKS: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [
150     // BLOCKS: i32 1, label %[[CASE1:.+]]
151     // BLOCKS: i32 2, label %[[CASE2:.+]]
152     // BLOCKS: [[CASE1]]
153     // BLOCKS: [[G_VAL:%.+]] = load double, double* [[G]]
154     // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
155     // BLOCKS: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]]
156     // BLOCKS: store double [[ADD]], double* [[G]]
157     // BLOCKS: call void @__kmpc_end_reduce(
158     // BLOCKS: br label %[[REDUCTION_DONE]]
159     // BLOCKS: [[CASE2]]
160     // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]]
161     // BLOCKS: fadd double
162     // BLOCKS: cmpxchg i64*
163     // BLOCKS: call void @__kmpc_end_reduce(
164     // BLOCKS: br label %[[REDUCTION_DONE]]
165     // BLOCKS: [[REDUCTION_DONE]]
166     // BLOCKS: ret void
167     ^{
168       // BLOCKS: define {{.+}} void {{@.+}}(i8*
169       g = 2;
170       g1 = 2;
171       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
172       // BLOCKS: store double 2.0{{.+}}, double*
173       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
174       // BLOCKS: ret
175     }();
176   }
177   }();
178   return 0;
179 #else
180   S<float> test;
181   float t_var = 0, t_var1;
182   int vec[] = {1, 2};
183   S<float> s_arr[] = {1, 2};
184   S<float> &var = test;
185   S<float> var1, arrs[10][4];
186   S<float> **var2 = foo();
187   S<float> vvar2[2];
188   S<float> (&var3)[2] = s_arr;
189 #pragma omp parallel
190 #pragma omp for reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
191   for (int i = 0; i < 2; ++i) {
192     vec[i] = t_var;
193     s_arr[i] = var;
194   }
195   int arr[10][vec[1]];
196 #pragma omp parallel for reduction(+:arr[1][:vec[1]]) reduction(&:arrs[1:vec[1]][1:2])
197   for (int i = 0; i < 10; ++i)
198     ++arr[1][i];
199 #pragma omp parallel
200 #pragma omp for reduction(+:arr) reduction(&:arrs)
201   for (int i = 0; i < 10; ++i)
202     ++arr[1][i];
203 #pragma omp parallel
204 #pragma omp for reduction(& : var2[0 : 5][1 : 6])
205   for (int i = 0; i < 10; ++i)
206     ;
207 #pragma omp parallel
208 #pragma omp for reduction(& : vvar2[0 : 5])
209   for (int i = 0; i < 10; ++i)
210     ;
211 #pragma omp parallel
212 #pragma omp for reduction(& : var3[1 : 2])
213   for (int i = 0; i < 10; ++i)
214     ;
215 #pragma omp parallel
216 #pragma omp for reduction(& : var3)
217   for (int i = 0; i < 10; ++i)
218     ;
219   return tmain<int>();
220 #endif
221 }
222 
223 // CHECK: define {{.*}}i{{[0-9]+}} @main()
224 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
225 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
226 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, float*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK:@.+]] to void
227 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i64, i64, i32*, [2 x i32]*, [10 x [4 x [[S_FLOAT_TY]]]]*)* [[MAIN_MICROTASK1:@.+]] to void
228 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i64, i64, i32*, [10 x [4 x [[S_FLOAT_TY]]]]*)* [[MAIN_MICROTASK2:@.+]] to void
229 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[S_FLOAT_TY]]***)* [[MAIN_MICROTASK3:@.+]] to void
230 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [2 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK4:@.+]] to void
231 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [2 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK5:@.+]] to void
232 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [2 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK6:@.+]] to void
233 // CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
234 // CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
235 // CHECK: ret
236 //
237 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, float* dereferenceable(4) %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}, float* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %vec, [2 x [[S_FLOAT_TY]]]* dereferenceable(8) %{{.+}})
238 // CHECK: [[T_VAR_PRIV:%.+]] = alloca float,
239 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]],
240 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_FLOAT_TY]],
241 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca float,
242 
243 // Reduction list for runtime.
244 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*],
245 
246 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
247 
248 // CHECK: [[T_VAR_REF:%.+]] = load float*, float** %
249 // CHECK: [[VAR1_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** %
250 // CHECK: [[T_VAR1_REF:%.+]] = load float*, float** %
251 
252 // For + reduction operation initial value of private variable is 0.
253 // CHECK: store float 0.0{{.+}}, float* [[T_VAR_PRIV]],
254 
255 // For & reduction operation initial value of private variable is ones in all bits.
256 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** %
257 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
258 
259 // For && reduction operation initial value of private variable is 1.0.
260 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR1_PRIV]])
261 
262 // For min reduction operation initial value of private variable is largest repesentable value.
263 // CHECK: store float 0x47EFFFFFE0000000, float* [[T_VAR1_PRIV]],
264 
265 
266 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
267 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
268 // CHECK: call void @__kmpc_for_static_init_4(
269 // Skip checks for internal operations.
270 // CHECK: call void @__kmpc_for_static_fini(
271 
272 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
273 
274 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
275 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR_PRIV]] to i8*
276 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
277 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
278 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_PRIV]] to i8*
279 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
280 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
281 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_PRIV]] to i8*
282 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
283 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
284 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR1_PRIV]] to i8*
285 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
286 
287 // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
288 
289 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8*
290 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]])
291 
292 // switch(res)
293 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [
294 // CHECK: i32 1, label %[[CASE1:.+]]
295 // CHECK: i32 2, label %[[CASE2:.+]]
296 // CHECK: ]
297 
298 // case 1:
299 // t_var += t_var_reduction;
300 // CHECK: [[T_VAR_VAL:%.+]] = load float, float* [[T_VAR_REF]],
301 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load float, float* [[T_VAR_PRIV]],
302 // CHECK: [[UP:%.+]] = fadd float [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]]
303 // CHECK: store float [[UP]], float* [[T_VAR_REF]],
304 
305 // var = var.operator &(var_reduction);
306 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_PRIV]])
307 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_REF]] to i8*
308 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8*
309 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
310 
311 // var1 = var1.operator &&(var1_reduction);
312 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_REF]])
313 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0
314 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
315 // CHECK: [[TRUE]]
316 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_PRIV]])
317 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0
318 // CHECK: br label %[[END2]]
319 // CHECK: [[END2]]
320 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
321 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float
322 // CHECK:  call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]])
323 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_REF]] to i8*
324 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8*
325 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
326 
327 // t_var1 = min(t_var1, t_var1_reduction);
328 // CHECK: [[T_VAR1_VAL:%.+]] = load float, float* [[T_VAR1_REF]],
329 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load float, float* [[T_VAR1_PRIV]],
330 // CHECK: [[CMP:%.+]] = fcmp olt float [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]]
331 // CHECK: br i1 [[CMP]]
332 // CHECK: [[UP:%.+]] = phi float
333 // CHECK: store float [[UP]], float* [[T_VAR1_REF]],
334 
335 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
336 // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]])
337 
338 // break;
339 // CHECK: br label %[[RED_DONE]]
340 
341 // case 2:
342 // t_var += t_var_reduction;
343 // CHECK: load float, float* [[T_VAR_PRIV]]
344 // CHECK: [[T_VAR_REF_INT:%.+]] = bitcast float* [[T_VAR_REF]] to i32*
345 // CHECK: [[OLD1:%.+]] = load atomic i32, i32* [[T_VAR_REF_INT]] monotonic,
346 // CHECK: br label %[[CONT:.+]]
347 // CHECK: [[CONT]]
348 // CHECK: [[ORIG_OLD_INT:%.+]] = phi i32 [ [[OLD1]], %{{.+}} ], [ [[OLD2:%.+]], %[[CONT]] ]
349 // CHECK: fadd float
350 // CHECK: [[UP_INT:%.+]] = load i32, i32*
351 // CHECK: [[T_VAR_REF_INT:%.+]] = bitcast float* [[T_VAR_REF]] to i32*
352 // CHECK: [[RES:%.+]] = cmpxchg i32* [[T_VAR_REF_INT]], i32 [[ORIG_OLD_INT]], i32 [[UP_INT]] monotonic monotonic
353 // CHECK: [[OLD2:%.+]] = extractvalue { i32, i1 } [[RES]], 0
354 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
355 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[ATOMIC_DONE:.+]], label %[[CONT]]
356 // CHECK: [[ATOMIC_DONE]]
357 
358 // var = var.operator &(var_reduction);
359 // CHECK: call void @__kmpc_critical(
360 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_PRIV]])
361 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_REF]] to i8*
362 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8*
363 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
364 // CHECK: call void @__kmpc_end_critical(
365 
366 // var1 = var1.operator &&(var1_reduction);
367 // CHECK: call void @__kmpc_critical(
368 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_REF]])
369 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0
370 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
371 // CHECK: [[TRUE]]
372 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_PRIV]])
373 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0
374 // CHECK: br label %[[END2]]
375 // CHECK: [[END2]]
376 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
377 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float
378 // CHECK:  call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]])
379 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_REF]] to i8*
380 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8*
381 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
382 // CHECK: call void @__kmpc_end_critical(
383 
384 // t_var1 = min(t_var1, t_var1_reduction);
385 // CHECK: load float, float* [[T_VAR1_PRIV]]
386 // CHECK: [[T_VAR1_REF_INT:%.+]] = bitcast float* [[T_VAR1_REF]] to i32*
387 // CHECK: [[OLD1:%.+]] = load atomic i32, i32* [[T_VAR1_REF_INT]] monotonic,
388 // CHECK: br label %[[CONT:.+]]
389 // CHECK: [[CONT]]
390 // CHECK: [[ORIG_OLD_INT:%.+]] = phi i32 [ [[OLD1]], %{{.+}} ], [ [[OLD2:%.+]], %{{.+}} ]
391 // CHECK: [[CMP:%.+]] = fcmp olt float
392 // CHECK: br i1 [[CMP]]
393 // CHECK: phi float
394 // CHECK: [[UP_INT:%.+]] = load i32
395 // CHECK: [[T_VAR1_REF_INT:%.+]] = bitcast float* [[T_VAR1_REF]] to i32*
396 // CHECK: [[RES:%.+]] = cmpxchg i32* [[T_VAR1_REF_INT]], i32 [[ORIG_OLD_INT]], i32 [[UP_INT]] monotonic monotonic
397 // CHECK: [[OLD2:%.+]] = extractvalue { i32, i1 } [[RES]], 0
398 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
399 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[ATOMIC_DONE:.+]], label %[[CONT]]
400 // CHECK: [[ATOMIC_DONE]]
401 
402 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
403 // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]])
404 
405 // break;
406 // CHECK: br label %[[RED_DONE]]
407 // CHECK: [[RED_DONE]]
408 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
409 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
410 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
411 
412 // CHECK: ret void
413 
414 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
415 //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
416 //  ...
417 //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
418 //  *(Type<n>-1*)rhs[<n>-1]);
419 // }
420 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
421 // t_var_lhs = (float*)lhs[0];
422 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
423 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
424 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to float*
425 // t_var_rhs = (float*)rhs[0];
426 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
427 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
428 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to float*
429 
430 // var_lhs = (S<float>*)lhs[1];
431 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
432 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
433 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_FLOAT_TY]]*
434 // var_rhs = (S<float>*)rhs[1];
435 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
436 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
437 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_FLOAT_TY]]*
438 
439 // var1_lhs = (S<float>*)lhs[2];
440 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
441 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
442 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_FLOAT_TY]]*
443 // var1_rhs = (S<float>*)rhs[2];
444 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
445 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
446 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_FLOAT_TY]]*
447 
448 // t_var1_lhs = (float*)lhs[3];
449 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
450 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
451 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to float*
452 // t_var1_rhs = (float*)rhs[3];
453 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
454 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
455 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to float*
456 
457 // t_var_lhs += t_var_rhs;
458 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load float, float* [[T_VAR_LHS]],
459 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load float, float* [[T_VAR_RHS]],
460 // CHECK: [[UP:%.+]] = fadd float [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]]
461 // CHECK: store float [[UP]], float* [[T_VAR_LHS]],
462 
463 // var_lhs = var_lhs.operator &(var_rhs);
464 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_LHS]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_RHS]])
465 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_LHS]] to i8*
466 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8*
467 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
468 
469 // var1_lhs = var1_lhs.operator &&(var1_rhs);
470 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_LHS]])
471 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0
472 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
473 // CHECK: [[TRUE]]
474 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_RHS]])
475 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0
476 // CHECK: br label %[[END2]]
477 // CHECK: [[END2]]
478 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
479 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float
480 // CHECK:  call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]])
481 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_LHS]] to i8*
482 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8*
483 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
484 
485 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
486 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load float, float* [[T_VAR1_LHS]],
487 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load float, float* [[T_VAR1_RHS]],
488 // CHECK: [[CMP:%.+]] = fcmp olt float [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]]
489 // CHECK: br i1 [[CMP]]
490 // CHECK: [[UP:%.+]] = phi float
491 // CHECK: store float [[UP]], float* [[T_VAR1_LHS]],
492 // CHECK: ret void
493 
494 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i64 %{{.+}}, i64 %{{.+}}, i32* %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [10 x [4 x [[S_FLOAT_TY]]]]* dereferenceable(160) %{{.+}})
495 
496 // Reduction list for runtime.
497 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*],
498 
499 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
500 // CHECK: store i{{[0-9]+}}* %{{.+}}, i{{[0-9]+}}**
501 // CHECK: store i{{[0-9]+}}* %{{.+}}, i{{[0-9]+}}** [[ARR_ADDR:%.+]],
502 // CHECK: [[ARR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[ARR_ADDR]],
503 
504 // CHECK: [[UB_CAST:%.+]] = ptrtoint i32* [[UB1_UP:%.+]] to i64
505 // CHECK: [[LB_CAST:%.+]] = ptrtoint i32* [[LB1_0:%.+]] to i64
506 // CHECK: [[DIFF:%.+]] = sub i64 [[UB_CAST]], [[LB_CAST]]
507 // CHECK: [[SIZE_1:%.+]] = sdiv exact i64 [[DIFF]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
508 // CHECK: [[ARR_SIZE:%.+]] = add nuw i64 [[SIZE_1]], 1
509 // CHECK: call i8* @llvm.stacksave()
510 // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]],
511 
512 // Check initialization of private copy.
513 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_PRIV]], i64 [[ARR_SIZE]]
514 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_PRIV]], [[END]]
515 // CHECK: br i1 [[ISEMPTY]],
516 // CHECK: phi i32*
517 // CHECK: store i32 0, i32* %
518 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]]
519 // CHECK: br i1 [[DONE]],
520 
521 // CHECK: [[ARRS_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[ARRS_SIZE:%.+]],
522 
523 // Check initialization of private copy.
524 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_PRIV]], i64 [[ARRS_SIZE]]
525 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_PRIV]], [[END]]
526 // CHECK: br i1 [[ISEMPTY]],
527 // CHECK: phi [[S_FLOAT_TY]]*
528 // CHECK: call void @_ZN1SIfEC1Ev([[S_FLOAT_TY]]* %
529 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]]
530 // CHECK: br i1 [[DONE]],
531 
532 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
533 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
534 // CHECK: call void @__kmpc_for_static_init_4(
535 // Skip checks for internal operations.
536 // CHECK: call void @__kmpc_for_static_fini(
537 
538 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
539 
540 // CHECK: [[ARR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
541 // CHECK: [[BITCAST:%.+]] = bitcast i32* [[ARR_PRIV]] to i8*
542 // CHECK: store i8* [[BITCAST]], i8** [[ARR_PRIV_REF]],
543 // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
544 // CHECK: [[BITCAST:%.+]] = inttoptr i64 [[ARR_SIZE]] to i8*
545 // CHECK: store i8* [[BITCAST]], i8** [[ARR_SIZE_REF]],
546 // CHECK: [[ARRS_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
547 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[ARRS_PRIV]] to i8*
548 // CHECK: store i8* [[BITCAST]], i8** [[ARRS_PRIV_REF]],
549 // CHECK: [[ARRS_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
550 // CHECK: [[BITCAST:%.+]] = inttoptr i64 [[ARRS_SIZE]] to i8*
551 // CHECK: store i8* [[BITCAST]], i8** [[ARRS_SIZE_REF]],
552 
553 // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
554 
555 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
556 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
557 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8*
558 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 2, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]])
559 
560 // switch(res)
561 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [
562 // CHECK: i32 1, label %[[CASE1:.+]]
563 // CHECK: i32 2, label %[[CASE2:.+]]
564 // CHECK: ]
565 
566 // case 1:
567 // CHECK: [[CASE1]]
568 
569 // arr[:] += arr_reduction[:];
570 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0]], i64 [[ARR_SIZE]]
571 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]]
572 // CHECK: br i1 [[ISEMPTY]],
573 // CHECK: phi i32*
574 // CHECK: [[ADD:%.+]] = add nsw i32 %
575 // CHECK: store i32 [[ADD]], i32* %
576 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]]
577 // CHECK: br i1 [[DONE]],
578 
579 // arrs[:] = var.operator &(arrs_reduction[:]);
580 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 [[ARRS_SIZE]]
581 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]]
582 // CHECK: br i1 [[ISEMPTY]],
583 // CHECK: phi [[S_FLOAT_TY]]*
584 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}})
585 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8*
586 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false)
587 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]]
588 // CHECK: br i1 [[DONE]],
589 
590 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
591 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]])
592 
593 // break;
594 // CHECK: br label %[[RED_DONE]]
595 
596 // case 2:
597 // CHECK: [[CASE2]]
598 
599 // arr[:] += arr_reduction[:];
600 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0]], i64 [[ARR_SIZE]]
601 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]]
602 // CHECK: br i1 [[ISEMPTY]],
603 // CHECK: phi i32*
604 // CHECK: atomicrmw add i32* %{{.+}}, i32 %{{.+}} monotonic
605 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]]
606 // CHECK: br i1 [[DONE]],
607 
608 // arrs[:] = var.operator &(arrs_reduction[:]);
609 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 [[ARRS_SIZE]]
610 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]]
611 // CHECK: br i1 [[ISEMPTY]],
612 // CHECK: phi [[S_FLOAT_TY]]*
613 // CHECK: call void @__kmpc_critical(
614 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}})
615 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8*
616 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false)
617 // CHECK: call void @__kmpc_end_critical(
618 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]]
619 // CHECK: br i1 [[DONE]],
620 
621 // break;
622 // CHECK: br label %[[RED_DONE]]
623 // CHECK: [[RED_DONE]]
624 
625 // Check destruction of private copy.
626 // CHECK: [[END:%.+]] = getelementptr inbounds [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_PRIV]], i64 [[ARRS_SIZE]]
627 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_PRIV]], [[END]]
628 // CHECK: br i1 [[ISEMPTY]],
629 // CHECK: phi [[S_FLOAT_TY]]*
630 // CHECK: call void @_ZN1SIfED1Ev([[S_FLOAT_TY]]* %
631 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[ARRS_PRIV]]
632 // CHECK: br i1 [[DONE]],
633 // CHECK: call void @llvm.stackrestore(i8*
634 
635 // CHECK: ret void
636 
637 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
638 //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
639 //  ...
640 //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
641 //  *(Type<n>-1*)rhs[<n>-1]);
642 // }
643 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
644 // arr_rhs = (int*)rhs[0];
645 // CHECK: [[ARR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
646 // CHECK: [[ARR_RHS_VOID:%.+]] = load i8*, i8** [[ARR_RHS_REF]],
647 // CHECK: [[ARR_RHS:%.+]] = bitcast i8* [[ARR_RHS_VOID]] to i32*
648 // arr_lhs = (int*)lhs[0];
649 // CHECK: [[ARR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
650 // CHECK: [[ARR_LHS_VOID:%.+]] = load i8*, i8** [[ARR_LHS_REF]],
651 // CHECK: [[ARR_LHS:%.+]] = bitcast i8* [[ARR_LHS_VOID]] to i32*
652 
653 // arr_size = (size_t)lhs[1];
654 // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
655 // CHECK: [[ARR_SIZE_VOID:%.+]] = load i8*, i8** [[ARR_SIZE_REF]],
656 // CHECK: [[ARR_SIZE:%.+]] = ptrtoint i8* [[ARR_SIZE_VOID]] to i64
657 
658 // arrs_rhs = (S<float>*)rhs[2];
659 // CHECK: [[ARRS_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
660 // CHECK: [[ARRS_RHS_VOID:%.+]] = load i8*, i8** [[ARRS_RHS_REF]],
661 // CHECK: [[ARRS_RHS:%.+]] = bitcast i8* [[ARRS_RHS_VOID]] to [[S_FLOAT_TY]]*
662 // arrs_lhs = (S<float>*)lhs[2];
663 // CHECK: [[ARRS_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
664 // CHECK: [[ARRS_LHS_VOID:%.+]] = load i8*, i8** [[ARRS_LHS_REF]],
665 // CHECK: [[ARRS_LHS:%.+]] = bitcast i8* [[ARRS_LHS_VOID]] to [[S_FLOAT_TY]]*
666 
667 // arrs_size = (size_t)lhs[3];
668 // CHECK: [[ARRS_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
669 // CHECK: [[ARRS_SIZE_VOID:%.+]] = load i8*, i8** [[ARRS_SIZE_REF]],
670 // CHECK: [[ARRS_SIZE:%.+]] = ptrtoint i8* [[ARRS_SIZE_VOID]] to i64
671 
672 // arr_lhs[:] += arr_rhs[:];
673 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_LHS]], i64 [[ARR_SIZE]]
674 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_LHS]], [[END]]
675 // CHECK: br i1 [[ISEMPTY]],
676 // CHECK: phi i32*
677 // CHECK: [[ADD:%.+]] = add nsw i32 %
678 // CHECK: store i32 [[ADD]], i32* %
679 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]]
680 // CHECK: br i1 [[DONE]],
681 
682 // arrs_lhs = arrs_lhs.operator &(arrs_rhs);
683 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 [[ARRS_SIZE]]
684 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]]
685 // CHECK: br i1 [[ISEMPTY]],
686 // CHECK: phi [[S_FLOAT_TY]]*
687 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}})
688 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8*
689 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false)
690 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]]
691 // CHECK: br i1 [[DONE]],
692 
693 // CHECK: ret void
694 
695 // CHECK: define internal void [[MAIN_MICROTASK2]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i64 %{{.+}}, i64 %{{.+}}, i32* %{{.+}}, [10 x [4 x [[S_FLOAT_TY]]]]* dereferenceable(160) %{{.+}})
696 
697 // CHECK: [[ARRS_PRIV:%.+]] = alloca [10 x [4 x [[S_FLOAT_TY]]]],
698 
699 // Reduction list for runtime.
700 // CHECK: [[RED_LIST:%.+]] = alloca [3 x i8*],
701 
702 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
703 
704 // CHECK: [[ARR_SIZE:%.+]] = udiv exact i64
705 // CHECK: call i8* @llvm.stacksave()
706 // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]],
707 
708 // Check initialization of private copy.
709 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_PRIV]], i64 [[ARR_SIZE]]
710 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_PRIV]], [[END]]
711 // CHECK: br i1 [[ISEMPTY]],
712 // CHECK: phi i32*
713 // CHECK: store i32 0, i32* %
714 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]]
715 // CHECK: br i1 [[DONE]],
716 
717 // Check initialization of private copy.
718 // CHECK: [[BEGIN:%.+]] = getelementptr inbounds [10 x [4 x [[S_FLOAT_TY]]]], [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]], i32 0, i32 0, i32 0
719 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[BEGIN]], i64 40
720 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[BEGIN]], [[END]]
721 // CHECK: br i1 [[ISEMPTY]],
722 // CHECK: phi [[S_FLOAT_TY]]*
723 // CHECK: call void @_ZN1SIfEC1Ev([[S_FLOAT_TY]]* %
724 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]]
725 // CHECK: br i1 [[DONE]],
726 // CHECK: [[LHS_BEGIN:%.+]] = bitcast [10 x [4 x [[S_FLOAT_TY]]]]* %{{.+}} to [[S_FLOAT_TY]]*
727 // CHECK: [[ARRS_PRIV_BEGIN:%.+]] = bitcast [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]] to [[S_FLOAT_TY]]*
728 
729 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
730 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
731 // CHECK: call void @__kmpc_for_static_init_4(
732 // Skip checks for internal operations.
733 // CHECK: call void @__kmpc_for_static_fini(
734 
735 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
736 
737 // CHECK: [[ARR_PRIV_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST]], i64 0, i64 0
738 // CHECK: [[BITCAST:%.+]] = bitcast i32* [[ARR_PRIV]] to i8*
739 // CHECK: store i8* [[BITCAST]], i8** [[ARR_PRIV_REF]],
740 // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST]], i64 0, i64 1
741 // CHECK: [[BITCAST:%.+]] = inttoptr i64 [[ARR_SIZE]] to i8*
742 // CHECK: store i8* [[BITCAST]], i8** [[ARR_SIZE_REF]],
743 // CHECK: [[ARRS_PRIV_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST]], i64 0, i64 2
744 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[ARRS_PRIV_BEGIN]] to i8*
745 // CHECK: store i8* [[BITCAST]], i8** [[ARRS_PRIV_REF]],
746 
747 // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
748 
749 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
750 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
751 // CHECK: [[BITCAST:%.+]] = bitcast [3 x i8*]* [[RED_LIST]] to i8*
752 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 2, i64 24, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]])
753 
754 // switch(res)
755 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [
756 // CHECK: i32 1, label %[[CASE1:.+]]
757 // CHECK: i32 2, label %[[CASE2:.+]]
758 // CHECK: ]
759 
760 // case 1:
761 // CHECK: [[CASE1]]
762 
763 // arr[:] += arr_reduction[:];
764 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0:%.+]], i64 [[ARR_SIZE]]
765 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]]
766 // CHECK: br i1 [[ISEMPTY]],
767 // CHECK: phi i32*
768 // CHECK: [[ADD:%.+]] = add nsw i32 %
769 // CHECK: store i32 [[ADD]], i32* %
770 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]]
771 // CHECK: br i1 [[DONE]],
772 
773 // arrs[:] = var.operator &(arrs_reduction[:]);
774 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[LHS_BEGIN]], i64 40
775 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[LHS_BEGIN]], [[END]]
776 // CHECK: br i1 [[ISEMPTY]],
777 // CHECK: phi [[S_FLOAT_TY]]*
778 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}})
779 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8*
780 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false)
781 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]]
782 // CHECK: br i1 [[DONE]],
783 
784 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
785 // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]])
786 
787 // break;
788 // CHECK: br label %[[RED_DONE]]
789 
790 // case 2:
791 // CHECK: [[CASE2]]
792 
793 // arr[:] += arr_reduction[:];
794 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0]], i64 [[ARR_SIZE]]
795 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]]
796 // CHECK: br i1 [[ISEMPTY]],
797 // CHECK: phi i32*
798 // CHECK: atomicrmw add i32* %{{.+}}, i32 %{{.+}} monotonic
799 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]]
800 // CHECK: br i1 [[DONE]],
801 
802 // arrs[:] = var.operator &(arrs_reduction[:]);
803 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[LHS_BEGIN]], i64 40
804 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[LHS_BEGIN]], [[END]]
805 // CHECK: br i1 [[ISEMPTY]],
806 // CHECK: phi [[S_FLOAT_TY]]*
807 // CHECK: call void @__kmpc_critical(
808 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}})
809 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8*
810 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false)
811 // CHECK: call void @__kmpc_end_critical(
812 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]]
813 // CHECK: br i1 [[DONE]],
814 
815 // break;
816 // CHECK: br label %[[RED_DONE]]
817 // CHECK: [[RED_DONE]]
818 
819 // Check destruction of private copy.
820 // CHECK: [[BEGIN:%.+]] = getelementptr inbounds [10 x [4 x [[S_FLOAT_TY]]]], [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]], i32 0, i32 0, i32 0
821 // CHECK: [[END:%.+]] = getelementptr inbounds [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[BEGIN]], i64 40
822 // CHECK: br
823 // CHECK: phi [[S_FLOAT_TY]]*
824 // CHECK: call void @_ZN1SIfED1Ev([[S_FLOAT_TY]]* %
825 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[BEGIN]]
826 // CHECK: br i1 [[DONE]],
827 // CHECK: call void @llvm.stackrestore(i8*
828 // CHECK: call void @__kmpc_barrier(
829 
830 // CHECK: ret void
831 
832 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
833 //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
834 //  ...
835 //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
836 //  *(Type<n>-1*)rhs[<n>-1]);
837 // }
838 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
839 // arr_rhs = (int*)rhs[0];
840 // CHECK: [[ARR_RHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
841 // CHECK: [[ARR_RHS_VOID:%.+]] = load i8*, i8** [[ARR_RHS_REF]],
842 // CHECK: [[ARR_RHS:%.+]] = bitcast i8* [[ARR_RHS_VOID]] to i32*
843 // arr_lhs = (int*)lhs[0];
844 // CHECK: [[ARR_LHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
845 // CHECK: [[ARR_LHS_VOID:%.+]] = load i8*, i8** [[ARR_LHS_REF]],
846 // CHECK: [[ARR_LHS:%.+]] = bitcast i8* [[ARR_LHS_VOID]] to i32*
847 
848 // arr_size = (size_t)lhs[1];
849 // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
850 // CHECK: [[ARR_SIZE_VOID:%.+]] = load i8*, i8** [[ARR_SIZE_REF]],
851 // CHECK: [[ARR_SIZE:%.+]] = ptrtoint i8* [[ARR_SIZE_VOID]] to i64
852 
853 // arrs_rhs = (S<float>*)rhs[2];
854 // CHECK: [[ARRS_RHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
855 // CHECK: [[ARRS_RHS_VOID:%.+]] = load i8*, i8** [[ARRS_RHS_REF]],
856 // CHECK: [[ARRS_RHS:%.+]] = bitcast i8* [[ARRS_RHS_VOID]] to [[S_FLOAT_TY]]*
857 // arrs_lhs = (S<float>*)lhs[2];
858 // CHECK: [[ARRS_LHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
859 // CHECK: [[ARRS_LHS_VOID:%.+]] = load i8*, i8** [[ARRS_LHS_REF]],
860 // CHECK: [[ARRS_LHS:%.+]] = bitcast i8* [[ARRS_LHS_VOID]] to [[S_FLOAT_TY]]*
861 
862 // arr_lhs[:] += arr_rhs[:];
863 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_LHS]], i64 [[ARR_SIZE]]
864 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_LHS]], [[END]]
865 // CHECK: br i1 [[ISEMPTY]],
866 // CHECK: phi i32*
867 // CHECK: [[ADD:%.+]] = add nsw i32 %
868 // CHECK: store i32 [[ADD]], i32* %
869 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]]
870 // CHECK: br i1 [[DONE]],
871 
872 // arrs_lhs = arrs_lhs.operator &(arrs_rhs);
873 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 40
874 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]]
875 // CHECK: br i1 [[ISEMPTY]],
876 // CHECK: phi [[S_FLOAT_TY]]*
877 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}})
878 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8*
879 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false)
880 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]]
881 // CHECK: br i1 [[DONE]],
882 
883 // CHECK: ret void
884 
885 // CHECK: define internal void [[MAIN_MICROTASK3]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[S_FLOAT_TY]]*** dereferenceable(8) %{{.+}})
886 
887 // CHECK: [[VAR2_ORIG_ADDR:%.+]] = alloca [[S_FLOAT_TY]]***,
888 
889 // Reduction list for runtime.
890 // CHECK: [[RED_LIST:%.+]] = alloca [2 x i8*],
891 
892 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
893 // CHECK: [[VAR2_ORIG:%.+]] = load [[S_FLOAT_TY]]***, [[S_FLOAT_TY]]**** [[VAR2_ORIG_ADDR]],
894 
895 // CHECK: [[LAST:%.+]] = ptrtoint [[S_FLOAT_TY]]* %{{.+}} to i64
896 // CHECK: [[FIRST:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW:%.+]] to i64
897 // CHECK: [[BYTE_DIF:%.+]] = sub i64 [[LAST]], [[FIRST]]
898 // CHECK: [[DIF:%.+]] = sdiv exact i64 [[BYTE_DIF]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
899 // CHECK: [[SIZE:%.+]] = add nuw i64 [[DIF]], 1
900 // CHECK: call i8* @llvm.stacksave()
901 // CHECK: [[VAR2_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[SIZE]],
902 // CHECK: [[LD:%.+]] = load [[S_FLOAT_TY]]**, [[S_FLOAT_TY]]*** [[VAR2_ORIG]],
903 // CHECK: [[ORIG_START:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[LD]],
904 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
905 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
906 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
907 // CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
908 // CHECK: [[PSEUDO_VAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR2_PRIV]], i64 [[OFFSET]]
909 // CHECK: store [[S_FLOAT_TY]]** [[REF:.+]], [[S_FLOAT_TY]]*** %
910 // CHECK: store [[S_FLOAT_TY]]* [[PSEUDO_VAR2_PRIV]], [[S_FLOAT_TY]]** [[REF]]
911 // CHECK: ret void
912 
913 // CHECK: define internal void [[MAIN_MICROTASK4]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [2 x [[S_FLOAT_TY]]]* dereferenceable(8) %{{.+}})
914 
915 // CHECK: [[VVAR2_ORIG_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*,
916 
917 // Reduction list for runtime.
918 // CHECK: [[RED_LIST:%.+]] = alloca [2 x i8*],
919 
920 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
921 // CHECK: [[VVAR2_ORIG:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[VVAR2_ORIG_ADDR]],
922 
923 // CHECK: [[LOW:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[VVAR2_ORIG]], i64 0, i64 0
924 // CHECK: [[LAST:%.+]] = ptrtoint [[S_FLOAT_TY]]* %{{.+}} to i64
925 // CHECK: [[FIRST:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
926 // CHECK: [[BYTE_DIF:%.+]] = sub i64 [[LAST]], [[FIRST]]
927 // CHECK: [[DIF:%.+]] = sdiv exact i64 [[BYTE_DIF]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
928 // CHECK: [[SIZE:%.+]] = add nuw i64 [[DIF]], 1
929 // CHECK: call i8* @llvm.stacksave()
930 // CHECK: [[VVAR2_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[SIZE]],
931 // CHECK: [[ORIG_START:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[VVAR2_ORIG]] to [[S_FLOAT_TY]]*
932 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
933 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
934 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
935 // CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
936 // CHECK: [[PSEUDO_VVAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VVAR2_PRIV]], i64 [[OFFSET]]
937 // CHECK: [[VVAR2_PRIV:%.+]] = bitcast [[S_FLOAT_TY]]* [[PSEUDO_VVAR2_PRIV]] to [2 x [[S_FLOAT_TY]]]*
938 // CHECK: ret void
939 
940 // CHECK: define internal void [[MAIN_MICROTASK5]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [2 x [[S_FLOAT_TY]]]* dereferenceable(8) %{{.+}})
941 
942 // CHECK: [[VAR3_ORIG_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*,
943 
944 // Reduction list for runtime.
945 // CHECK: [[RED_LIST:%.+]] = alloca [2 x i8*],
946 
947 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
948 
949 // CHECK: [[VAR3_ORIG:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]],
950 // CHECK: store [2 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]], [2 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR:%.+]],
951 // CHECK: [[LAST:%.+]] = ptrtoint [[S_FLOAT_TY]]* %{{.+}} to i64
952 // CHECK: [[FIRST:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW:%.+]] to i64
953 // CHECK: [[BYTE_DIF:%.+]] = sub i64 [[LAST]], [[FIRST]]
954 // CHECK: [[DIF:%.+]] = sdiv exact i64 [[BYTE_DIF]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
955 // CHECK: [[SIZE:%.+]] = add nuw i64 [[DIF]], 1
956 // CHECK: call i8* @llvm.stacksave()
957 // CHECK: [[VAR3_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[SIZE]],
958 // CHECK: [[VAR3_ORIG:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]],
959 // CHECK: [[ORIG_START:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]] to [[S_FLOAT_TY]]*
960 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
961 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
962 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
963 // CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
964 // CHECK: [[PSEUDO_VAR3_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR3_PRIV]], i64 [[OFFSET]]
965 // CHECK: [[VAR3_PRIV:%.+]] = bitcast [[S_FLOAT_TY]]* [[PSEUDO_VAR3_PRIV]] to [2 x [[S_FLOAT_TY]]]*
966 
967 // CHECK: store [2 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]], [2 x [[S_FLOAT_TY]]]** %
968 
969 // CHECK: ret void
970 
971 // CHECK: define internal void [[MAIN_MICROTASK6]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [2 x [[S_FLOAT_TY]]]* dereferenceable(8) %{{.+}})
972 
973 // CHECK: [[VAR3_ORIG_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*,
974 // CHECK: [[VAR3_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]],
975 
976 // Reduction list for runtime.
977 // CHECK: [[RED_LIST:%.+]] = alloca [1 x i8*],
978 
979 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
980 
981 // CHECK: [[VAR3_ORIG:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]],
982 // CHECK: store [2 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]], [2 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR:%.+]],
983 // CHECK: [[VAR3_ORIG:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]],
984 // CHECK: getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]], i32 0, i32 0
985 // CHECK: getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* %{{.+}}, i64 2
986 
987 // CHECK: store [2 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]], [2 x [[S_FLOAT_TY]]]** %
988 // CHECK: bitcast [2 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]] to [[S_FLOAT_TY]]*
989 
990 // CHECK: ret void
991 
992 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
993 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
994 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
995 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [[S_INT_TY]]*, [[S_INT_TY]]*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*)* [[TMAIN_MICROTASK:@.+]] to void
996 // CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
997 // CHECK: ret
998 //
999 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [[S_INT_TY]]* dereferenceable(4) %{{.+}}, [[S_INT_TY]]* dereferenceable(4) %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [2 x [[S_INT_TY]]]* dereferenceable(8) %{{.+}})
1000 // CHECK: alloca i{{[0-9]+}},
1001 // CHECK: alloca i{{[0-9]+}},
1002 // CHECK: alloca i{{[0-9]+}},
1003 // CHECK: alloca i{{[0-9]+}},
1004 // CHECK: alloca i{{[0-9]+}},
1005 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
1006 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]],
1007 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_INT_TY]],
1008 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca i{{[0-9]+}},
1009 
1010 // Reduction list for runtime.
1011 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*],
1012 
1013 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
1014 
1015 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
1016 // CHECK: [[VAR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
1017 // CHECK: [[T_VAR1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
1018 
1019 // For + reduction operation initial value of private variable is 0.
1020 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[T_VAR_PRIV]],
1021 
1022 // For & reduction operation initial value of private variable is ones in all bits.
1023 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
1024 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR_PRIV]])
1025 
1026 // For && reduction operation initial value of private variable is 1.0.
1027 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR1_PRIV]])
1028 
1029 // For min reduction operation initial value of private variable is largest repesentable value.
1030 // CHECK: store i{{[0-9]+}} 2147483647, i{{[0-9]+}}* [[T_VAR1_PRIV]],
1031 
1032 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
1033 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
1034 // CHECK: call void @__kmpc_for_static_init_4(
1035 // Skip checks for internal operations.
1036 // CHECK: call void @__kmpc_for_static_fini(
1037 
1038 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
1039 
1040 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
1041 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8*
1042 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
1043 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
1044 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8*
1045 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
1046 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
1047 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8*
1048 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
1049 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
1050 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8*
1051 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
1052 
1053 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
1054 
1055 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8*
1056 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]])
1057 
1058 // switch(res)
1059 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [
1060 // CHECK: i32 1, label %[[CASE1:.+]]
1061 // CHECK: i32 2, label %[[CASE2:.+]]
1062 // CHECK: ]
1063 
1064 // case 1:
1065 // t_var += t_var_reduction;
1066 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]],
1067 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
1068 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]]
1069 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_REF]],
1070 
1071 // var = var.operator &(var_reduction);
1072 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]])
1073 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8*
1074 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
1075 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
1076 
1077 // var1 = var1.operator &&(var1_reduction);
1078 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]])
1079 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
1080 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
1081 // CHECK: [[TRUE]]
1082 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]])
1083 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
1084 // CHECK: br label %[[END2]]
1085 // CHECK: [[END2]]
1086 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
1087 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
1088 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
1089 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8*
1090 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
1091 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
1092 
1093 // t_var1 = min(t_var1, t_var1_reduction);
1094 // CHECK: [[T_VAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_REF]],
1095 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]],
1096 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]]
1097 // CHECK: br i1 [[CMP]]
1098 // CHECK: [[UP:%.+]] = phi i32
1099 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_REF]],
1100 
1101 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
1102 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]])
1103 
1104 // break;
1105 // CHECK: br label %[[RED_DONE]]
1106 
1107 // case 2:
1108 // t_var += t_var_reduction;
1109 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]]
1110 // CHECK: atomicrmw add i32* [[T_VAR_REF]], i32 [[T_VAR_PRIV_VAL]] monotonic
1111 
1112 // var = var.operator &(var_reduction);
1113 // CHECK: call void @__kmpc_critical(
1114 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]])
1115 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8*
1116 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
1117 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
1118 // CHECK: call void @__kmpc_end_critical(
1119 
1120 // var1 = var1.operator &&(var1_reduction);
1121 // CHECK: call void @__kmpc_critical(
1122 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]])
1123 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
1124 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
1125 // CHECK: [[TRUE]]
1126 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]])
1127 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
1128 // CHECK: br label %[[END2]]
1129 // CHECK: [[END2]]
1130 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
1131 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
1132 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
1133 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8*
1134 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
1135 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
1136 // CHECK: call void @__kmpc_end_critical(
1137 
1138 // t_var1 = min(t_var1, t_var1_reduction);
1139 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]]
1140 // CHECK: atomicrmw min i32* [[T_VAR1_REF]], i32 [[T_VAR1_PRIV_VAL]] monotonic
1141 
1142 // break;
1143 // CHECK: br label %[[RED_DONE]]
1144 // CHECK: [[RED_DONE]]
1145 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
1146 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]*
1147 // CHECK: ret void
1148 
1149 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
1150 //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
1151 //  ...
1152 //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
1153 //  *(Type<n>-1*)rhs[<n>-1]);
1154 // }
1155 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
1156 // t_var_lhs = (i{{[0-9]+}}*)lhs[0];
1157 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
1158 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
1159 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}*
1160 // t_var_rhs = (i{{[0-9]+}}*)rhs[0];
1161 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
1162 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
1163 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}*
1164 
1165 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
1166 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
1167 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
1168 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]*
1169 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
1170 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
1171 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
1172 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]*
1173 
1174 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
1175 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
1176 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
1177 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]*
1178 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
1179 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
1180 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
1181 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]*
1182 
1183 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
1184 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
1185 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
1186 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}*
1187 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
1188 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
1189 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
1190 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}*
1191 
1192 // t_var_lhs += t_var_rhs;
1193 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_LHS]],
1194 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_RHS]],
1195 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]]
1196 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_LHS]],
1197 
1198 // var_lhs = var_lhs.operator &(var_rhs);
1199 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_LHS]], [[S_INT_TY]]* dereferenceable(4) [[VAR_RHS]])
1200 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_LHS]] to i8*
1201 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8*
1202 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
1203 
1204 // var1_lhs = var1_lhs.operator &&(var1_rhs);
1205 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_LHS]])
1206 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
1207 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]]
1208 // CHECK: [[TRUE]]
1209 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_RHS]])
1210 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0
1211 // CHECK: br label %[[END2]]
1212 // CHECK: [[END2]]
1213 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ]
1214 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32
1215 // CHECK:  call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]])
1216 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_LHS]] to i8*
1217 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8*
1218 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false)
1219 
1220 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
1221 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_LHS]],
1222 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_RHS]],
1223 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]]
1224 // CHECK: br i1 [[CMP]]
1225 // CHECK: [[UP:%.+]] = phi i32
1226 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_LHS]],
1227 // CHECK: ret void
1228 
1229 #endif
1230 
1231