1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -g -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -gline-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG
5 // REQUIRES: x86-registered-target
6 // expected-no-diagnostics
7 #ifndef HEADER
8 #define HEADER
9 
10 long long get_val() { return 0; }
11 double *g_ptr;
12 
13 // CHECK-LABEL: define {{.*void}} @{{.*}}simple{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
14 void simple(float *a, float *b, float *c, float *d) {
15 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
16 // CHECK: [[K0:%.+]] = call {{.*}}i64 @{{.*}}get_val
17 // CHECK-NEXT: store i64 [[K0]], i64* [[K_VAR:%[^,]+]]
18 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
19 // CHECK: store i32 12, i32* [[LIN_VAR:%[^,]+]]
20 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
21 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
22 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
23 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
24 // CHECK: store i32 -1, i32* [[A:%.+]],
25 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
26 // CHECK: store i32 -1, i32* [[R:%[^,]+]],
27 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
28   #pragma omp parallel for simd
29 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
30 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
31 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 5
32 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
33 // CHECK: [[TRUE]]
34 // CHECK: br label %[[SWITCH:[^,]+]]
35 // CHECK: [[FALSE]]
36 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
37 // CHECK: br label %[[SWITCH]]
38 // CHECK: [[SWITCH]]
39 // CHECK: [[UP:%.+]] = phi i32 [ 5, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
40 // CHECK: store i32 [[UP]], i32* [[UB]],
41 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
42 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]],
43 
44 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID:[0-9]+]]
45 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
46 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]]
47 // CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]]
48   for (int i = 3; i < 32; i += 5) {
49 // CHECK: [[SIMPLE_LOOP1_BODY]]
50 // Start of body: calculate i from IV:
51 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
52 // CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5
53 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]]
54 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
55 // ... loop body ...
56 // End of body: store into a[i]:
57 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
58     a[i] = b[i] * c[i] * d[i];
59 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
60 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1
61 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]]
62 // br label %{{.+}}, !llvm.loop !{{.+}}
63   }
64 // CHECK: [[SIMPLE_LOOP1_END]]
65 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
66 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
67 
68   long long k = get_val();
69 
70   #pragma omp parallel for simd linear(k : 3) schedule(dynamic)
71 // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_VAR:%[^,]+]]
72 // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]]
73 
74 // CHECK: call void @__kmpc_dispatch_init_4(%ident_t* {{.+}}, i32 %{{.+}}, i32 35, i32 0, i32 8, i32 1, i32 1)
75 // CHECK: [[NEXT:%.+]] = call i32 @__kmpc_dispatch_next_4(%ident_t* {{.+}}, i32 %{{.+}}, i32* %{{.+}}, i32* [[LB:%.+]], i32* [[UB:%.+]], i32* %{{.+}})
76 // CHECK: [[COND:%.+]] = icmp ne i32 [[NEXT]], 0
77 // CHECK: br i1 [[COND]], label %[[CONT:.+]], label %[[END:.+]]
78 // CHECK: [[CONT]]
79 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
80 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV2:%[^,]+]],
81 
82 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]]
83 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
84 // CHECK-NEXT: [[CMP2:%.+]] = icmp sle i32 [[IV2]], [[UB_VAL]]
85 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]]
86   for (int i = 10; i > 1; i--) {
87 // CHECK: [[SIMPLE_LOOP2_BODY]]
88 // Start of body: calculate i from IV:
89 // CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
90 // FIXME: It is interesting, why the following "mul 1" was not constant folded?
91 // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1
92 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]]
93 // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
94 //
95 // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
96 // CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
97 // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3
98 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64
99 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]]
100 // Update of the privatized version of linear variable!
101 // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]]
102     a[k]++;
103     k = k + 3;
104 // CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
105 // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1
106 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
107 // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]]
108   }
109 // CHECK: [[SIMPLE_LOOP2_END]]
110 //
111 // Update linear vars after loop, as the loop was operating on a private version.
112 // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]]
113 // CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27
114 // CHECK-NEXT: store i64 [[LIN_ADD2]], i64* %{{.+}}
115 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
116 
117   int lin = 12;
118   #pragma omp parallel for simd linear(lin : get_val()), linear(g_ptr)
119 
120 // Init linear private var.
121 // CHECK: [[LIN_VAR:%.+]] = load i32*, i32** %
122 // CHECK-NEXT: [[LIN_LOAD:%.+]] = load i32, i32* [[LIN_VAR]]
123 // CHECK-NEXT: store i32 [[LIN_LOAD]], i32* [[LIN_START:%[^,]+]]
124 // Remember linear step.
125 // CHECK: [[CALL_VAL:%.+]] = invoke
126 // CHECK: store i64 [[CALL_VAL]], i64* [[LIN_STEP:%[^,]+]]
127 
128 // CHECK: [[GLIN_VAR:%.+]] = load double**, double*** %
129 // CHECK-NEXT: [[GLIN_LOAD:%.+]] = load double*, double** [[GLIN_VAR]]
130 // CHECK-NEXT: store double* [[GLIN_LOAD]], double** [[GLIN_START:%[^,]+]]
131 
132 // CHECK: call void @__kmpc_for_static_init_8u(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
133 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
134 // CHECK: [[CMP:%.+]] = icmp ugt i64 [[UB_VAL]], 3
135 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
136 // CHECK: [[TRUE]]
137 // CHECK: br label %[[SWITCH:[^,]+]]
138 // CHECK: [[FALSE]]
139 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
140 // CHECK: br label %[[SWITCH]]
141 // CHECK: [[SWITCH]]
142 // CHECK: [[UP:%.+]] = phi i64 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
143 // CHECK: store i64 [[UP]], i64* [[UB]],
144 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
145 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV3:%[^,]+]],
146 
147 // CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID:[0-9]+]]
148 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
149 // CHECK-NEXT: [[CMP3:%.+]] = icmp ule i64 [[IV3]], [[UB_VAL]]
150 // CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]]
151   for (unsigned long long it = 2000; it >= 600; it-=400) {
152 // CHECK: [[SIMPLE_LOOP3_BODY]]
153 // Start of body: calculate it from IV:
154 // CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
155 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400
156 // CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]]
157 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
158 //
159 // Linear start and step are used to calculate current value of the linear variable.
160 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
161 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
162 // CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
163 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
164 // CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
165 // CHECK-NEXT: [[MUL:%.+]] = mul i64 [[IV3_1]], 1
166 // CHECK: [[GEP:%.+]] = getelementptr{{.*}}[[GLINSTART]]
167 // CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
168     *g_ptr++ = 0.0;
169 // CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
170 // CHECK: store double{{.*}}[[GEP_VAL]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
171     a[it + lin]++;
172 // CHECK: [[FLT_INC:%.+]] = fadd float
173 // CHECK-NEXT: store float [[FLT_INC]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
174 // CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
175 // CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1
176 // CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]]
177   }
178 // CHECK: [[SIMPLE_LOOP3_END]]
179 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
180 //
181 // Linear start and step are used to calculate final value of the linear variables.
182 // CHECK: [[LIN_VAR:%.+]] = load i32*, i32** %
183 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]
184 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]
185 // CHECK: store i32 {{.+}}, i32* [[LIN_VAR]],
186 // CHECK: [[GLIN_VAR:%.+]] = load double**, double*** %
187 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]
188 // CHECK: store double* {{.*}}[[GLIN_VAR]]
189 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
190 
191   #pragma omp parallel for simd
192 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
193 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
194 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 3
195 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
196 // CHECK: [[TRUE]]
197 // CHECK: br label %[[SWITCH:[^,]+]]
198 // CHECK: [[FALSE]]
199 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
200 // CHECK: br label %[[SWITCH]]
201 // CHECK: [[SWITCH]]
202 // CHECK: [[UP:%.+]] = phi i32 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
203 // CHECK: store i32 [[UP]], i32* [[UB]],
204 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
205 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV4:%[^,]+]],
206 
207 // CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID:[0-9]+]]
208 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]]
209 // CHECK-NEXT: [[CMP4:%.+]] = icmp sle i32 [[IV4]], [[UB_VAL]]
210 // CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]]
211   for (short it = 6; it <= 20; it-=-4) {
212 // CHECK: [[SIMPLE_LOOP4_BODY]]
213 // Start of body: calculate it from IV:
214 // CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]]
215 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4
216 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]]
217 // CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16
218 // CHECK-NEXT: store i16 [[LC_IT_3]], i16* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]]
219 
220 // CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]]
221 // CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1
222 // CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]]
223   }
224 // CHECK: [[SIMPLE_LOOP4_END]]
225 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
226 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
227 
228   #pragma omp parallel for simd
229 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
230 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
231 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 25
232 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
233 // CHECK: [[TRUE]]
234 // CHECK: br label %[[SWITCH:[^,]+]]
235 // CHECK: [[FALSE]]
236 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
237 // CHECK: br label %[[SWITCH]]
238 // CHECK: [[SWITCH]]
239 // CHECK: [[UP:%.+]] = phi i32 [ 25, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
240 // CHECK: store i32 [[UP]], i32* [[UB]],
241 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
242 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV5:%[^,]+]],
243 
244 // CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID:[0-9]+]]
245 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]]
246 // CHECK-NEXT: [[CMP5:%.+]] = icmp sle i32 [[IV5]], [[UB_VAL]]
247 // CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]]
248   for (unsigned char it = 'z'; it >= 'a'; it+=-1) {
249 // CHECK: [[SIMPLE_LOOP5_BODY]]
250 // Start of body: calculate it from IV:
251 // CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]]
252 // CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1
253 // CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]]
254 // CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8
255 // CHECK-NEXT: store i8 [[LC_IT_2]], i8* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]]
256 
257 // CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]]
258 // CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1
259 // CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]]
260   }
261 // CHECK: [[SIMPLE_LOOP5_END]]
262 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
263 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
264 
265 // CHECK-NOT: mul i32 %{{.+}}, 10
266   #pragma omp parallel for simd
267   for (unsigned i=100; i<10; i+=10) {
268   }
269 
270   int A;
271   {
272   A = -1;
273   #pragma omp parallel for simd lastprivate(A)
274 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
275 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
276 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6
277 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
278 // CHECK: [[TRUE]]
279 // CHECK: br label %[[SWITCH:[^,]+]]
280 // CHECK: [[FALSE]]
281 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
282 // CHECK: br label %[[SWITCH]]
283 // CHECK: [[SWITCH]]
284 // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
285 // CHECK: store i64 [[UP]], i64* [[UB]],
286 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
287 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV7:%[^,]+]],
288 
289 // CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]]
290 // CHECK: [[SIMD_LOOP7_COND]]
291 // CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID:[0-9]+]]
292 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
293 // CHECK-NEXT: [[CMP7:%.+]] = icmp sle i64 [[IV7]], [[UB_VAL]]
294 // CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]]
295   for (long long i = -10; i < 10; i += 3) {
296 // CHECK: [[SIMPLE_LOOP7_BODY]]
297 // Start of body: calculate i from IV:
298 // CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
299 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3
300 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]]
301 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
302 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
303 // CHECK-NEXT: [[CONV:%.+]] = trunc i64 [[LC_VAL]] to i32
304 // CHECK-NEXT: store i32 [[CONV]], i32* [[A_PRIV:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
305     A = i;
306 // CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
307 // CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1
308 // CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]]
309   }
310 // CHECK: [[SIMPLE_LOOP7_END]]
311 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
312 // CHECK: load i32, i32*
313 // CHECK: icmp ne i32 %{{.+}}, 0
314 // CHECK: br i1 %{{.+}}, label
315 // CHECK: [[A_PRIV_VAL:%.+]] = load i32, i32* [[A_PRIV]],
316 // CHECK-NEXT: store i32 [[A_PRIV_VAL]], i32* %{{.+}},
317 // CHECK-NEXT: br label
318 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
319   }
320   int R;
321   {
322   R = -1;
323 // CHECK: store i32 1, i32* [[R_PRIV:%[^,]+]],
324   #pragma omp parallel for simd reduction(*:R)
325 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
326 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
327 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6
328 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
329 // CHECK: [[TRUE]]
330 // CHECK: br label %[[SWITCH:[^,]+]]
331 // CHECK: [[FALSE]]
332 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
333 // CHECK: br label %[[SWITCH]]
334 // CHECK: [[SWITCH]]
335 // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
336 // CHECK: store i64 [[UP]], i64* [[UB]],
337 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
338 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV8:%[^,]+]],
339 
340 // CHECK: br label %[[SIMD_LOOP8_COND:[^,]+]]
341 // CHECK: [[SIMD_LOOP8_COND]]
342 // CHECK-NEXT: [[IV8:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID:[0-9]+]]
343 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
344 // CHECK-NEXT: [[CMP8:%.+]] = icmp sle i64 [[IV8]], [[UB_VAL]]
345 // CHECK-NEXT: br i1 [[CMP8]], label %[[SIMPLE_LOOP8_BODY:.+]], label %[[SIMPLE_LOOP8_END:[^,]+]]
346   for (long long i = -10; i < 10; i += 3) {
347 // CHECK: [[SIMPLE_LOOP8_BODY]]
348 // Start of body: calculate i from IV:
349 // CHECK: [[IV8_0:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
350 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV8_0]], 3
351 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]]
352 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
353 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
354 // CHECK: store i32 %{{.+}}, i32* [[R_PRIV]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
355     R *= i;
356 // CHECK: [[IV8_2:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
357 // CHECK-NEXT: [[ADD8_2:%.+]] = add nsw i64 [[IV8_2]], 1
358 // CHECK-NEXT: store i64 [[ADD8_2]], i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]]
359   }
360 // CHECK: [[SIMPLE_LOOP8_END]]
361 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
362 // CHECK: call i32 @__kmpc_reduce_nowait(
363 // CHECK: [[R_PRIV_VAL:%.+]] = load i32, i32* [[R_PRIV]],
364 // CHECK: [[RED:%.+]] = mul nsw i32 %{{.+}}, [[R_PRIV_VAL]]
365 // CHECK-NEXT: store i32 [[RED]], i32* %{{.+}},
366 // CHECK-NEXT: call void @__kmpc_end_reduce_nowait(
367 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
368   }
369 }
370 
371 template <class T, unsigned K> T tfoo(T a) { return a + K; }
372 
373 template <typename T, unsigned N>
374 int templ1(T a, T *z) {
375   #pragma omp parallel for simd collapse(N)
376   for (int i = 0; i < N * 2; i++) {
377     for (long long j = 0; j < (N + N + N + N); j += 2) {
378       z[i + j] = a + tfoo<T, N>(i + j);
379     }
380   }
381   return 0;
382 }
383 
384 // Instatiation templ1<float,2>
385 // CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float {{.+}}, float* {{.+}})
386 // CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
387 void inst_templ1() {
388   float a;
389   float z[100];
390   templ1<float,2> (a, z);
391 }
392 
393 
394 typedef int MyIdx;
395 
396 class IterDouble {
397   double *Ptr;
398 public:
399   IterDouble operator++ () const {
400     IterDouble n;
401     n.Ptr = Ptr + 1;
402     return n;
403   }
404   bool operator < (const IterDouble &that) const {
405     return Ptr < that.Ptr;
406   }
407   double & operator *() const {
408     return *Ptr;
409   }
410   MyIdx operator - (const IterDouble &that) const {
411     return (MyIdx) (Ptr - that.Ptr);
412   }
413   IterDouble operator + (int Delta) {
414     IterDouble re;
415     re.Ptr = Ptr + Delta;
416     return re;
417   }
418 
419   ///~IterDouble() {}
420 };
421 
422 // CHECK-LABEL: define {{.*void}} @{{.*}}iter_simple{{.*}}
423 void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) {
424 //
425 // Calculate number of iterations before the loop body.
426 // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}}
427 // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1
428 // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1
429 // CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1
430 // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1
431 // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}}
432   #pragma omp parallel for simd
433 
434 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
435 // CHECK-DAG: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
436 // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]],
437 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], [[OMP_LAST_IT_VAL]]
438 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
439 // CHECK: [[TRUE]]
440 // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]],
441 // CHECK: br label %[[SWITCH:[^,]+]]
442 // CHECK: [[FALSE]]
443 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
444 // CHECK: br label %[[SWITCH]]
445 // CHECK: [[SWITCH]]
446 // CHECK: [[UP:%.+]] = phi i32 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
447 // CHECK: store i32 [[UP]], i32* [[UB]],
448 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
449 // CHECK: store i32 [[LB_VAL]], i32* [[IT_OMP_IV:%[^,]+]],
450 
451 // CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}} !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID:[0-9]+]]
452 // CHECK-NEXT: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
453 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]]
454 // CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]]
455   for (IterDouble i = ia; i < ib; ++i) {
456 // CHECK: [[IT_BODY]]
457 // Start of body: calculate i from index:
458 // CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
459 // Call of operator+ (i, IV).
460 // CHECK: {{%.+}} = invoke {{.+}} @{{.*}}IterDouble{{.*}}
461 // ... loop body ...
462    *i = *ic * 0.5;
463 // Float multiply and save result.
464 // CHECK: [[MULR:%.+]] = fmul double {{%.+}}, 5.000000e-01
465 // CHECK-NEXT: invoke {{.+}} @{{.*}}IterDouble{{.*}}
466 // CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]], !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
467    ++ic;
468 //
469 // CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
470 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1
471 // CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]]
472 // br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]]
473   }
474 // CHECK: [[IT_END]]
475 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
476 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
477 // CHECK: ret void
478 }
479 
480 
481 // CHECK-LABEL: define {{.*void}} @{{.*}}collapsed{{.*}}
482 void collapsed(float *a, float *b, float *c, float *d) {
483   int i; // outer loop counter
484   unsigned j; // middle loop couter, leads to unsigned icmp in loop header.
485   // k declared in the loop init below
486   short l; // inner loop counter
487 // CHECK: call void @__kmpc_for_static_init_4u(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
488 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
489 // CHECK: [[CMP:%.+]] = icmp ugt i32 [[UB_VAL]], 119
490 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
491 // CHECK: [[TRUE]]
492 // CHECK: br label %[[SWITCH:[^,]+]]
493 // CHECK: [[FALSE]]
494 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
495 // CHECK: br label %[[SWITCH]]
496 // CHECK: [[SWITCH]]
497 // CHECK: [[UP:%.+]] = phi i32 [ 119, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
498 // CHECK: store i32 [[UP]], i32* [[UB]],
499 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
500 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]],
501 //
502   #pragma omp parallel for simd collapse(4)
503 
504 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID:[0-9]+]]
505 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
506 // CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB_VAL]]
507 // CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]]
508   for (i = 1; i < 3; i++) // 2 iterations
509     for (j = 2u; j < 5u; j++) //3 iterations
510       for (int k = 3; k <= 6; k++) // 4 iterations
511         for (l = 4; l < 9; ++l) // 5 iterations
512         {
513 // CHECK: [[COLL1_BODY]]
514 // Start of body: calculate i from index:
515 // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
516 // Calculation of the loop counters values.
517 // CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60
518 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1
519 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]]
520 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
521 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
522 // CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20
523 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3
524 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1
525 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]]
526 // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]]
527 // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
528 // CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5
529 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4
530 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1
531 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]]
532 // CHECK-NEXT: store i32 [[CALC_K_3]], i32* [[LC_K:.+]]
533 // CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
534 // CHECK-NEXT: [[CALC_L_1:%.+]] = urem i32 [[IV1_4]], 5
535 // CHECK-NEXT: [[CALC_L_1_MUL1:%.+]] = mul i32 [[CALC_L_1]], 1
536 // CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[CALC_L_1_MUL1]]
537 // CHECK-NEXT: [[CALC_L_3:%.+]] = trunc i32 [[CALC_L_2]] to i16
538 // CHECK-NEXT: store i16 [[CALC_L_3]], i16* [[LC_L:.+]]
539 // ... loop body ...
540 // End of body: store into a[i]:
541 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
542     float res = b[j] * c[k];
543     a[i] = res * d[l];
544 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
545 // CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1
546 // CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
547 // br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]]
548 // CHECK: [[COLL1_END]]
549   }
550 // i,j,l are updated; k is not updated.
551 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
552 // CHECK: store i32 3, i32* [[I:%[^,]+]]
553 // CHECK: store i32 5, i32* [[I:%[^,]+]]
554 // CHECK: store i16 9, i16* [[I:%[^,]+]]
555 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
556 // CHECK: ret void
557 }
558 
559 extern char foo();
560 extern double globalfloat;
561 
562 // CHECK-LABEL: define {{.*void}} @{{.*}}widened{{.*}}
563 void widened(float *a, float *b, float *c, float *d) {
564   int i; // outer loop counter
565   short j; // inner loop counter
566   globalfloat = 1.0;
567   int localint = 1;
568 // CHECK: store double {{.+}}, double* [[GLOBALFLOAT:@.+]]
569 // Counter is widened to 64 bits.
570 // CHECK:     [[MUL:%.+]] = mul nsw i64 2, %{{.+}}
571 // CHECK-NEXT: [[SUB:%.+]] = sub nsw i64 [[MUL]], 1
572 // CHECK-NEXT: store i64 [[SUB]], i64* [[OMP_LAST_IT:%[^,]+]],
573 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
574 // CHECK-DAG: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
575 // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]],
576 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], [[OMP_LAST_IT_VAL]]
577 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
578 // CHECK: [[TRUE]]
579 // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]],
580 // CHECK: br label %[[SWITCH:[^,]+]]
581 // CHECK: [[FALSE]]
582 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
583 // CHECK: br label %[[SWITCH]]
584 // CHECK: [[SWITCH]]
585 // CHECK: [[UP:%.+]] = phi i64 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
586 // CHECK: store i64 [[UP]], i64* [[UB]],
587 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
588 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV:%[^,]+]],
589 //
590   #pragma omp parallel for simd collapse(2) private(globalfloat, localint)
591 
592 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID:[0-9]+]]
593 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
594 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]]
595 // CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]]
596   for (i = 1; i < 3; i++) // 2 iterations
597     for (j = 0; j < foo(); j++) // foo() iterations
598   {
599 // CHECK: [[WIDE1_BODY]]
600 // Start of body: calculate i from index:
601 // CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
602 // Calculation of the loop counters values...
603 // CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]]
604 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
605 // CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]]
606 // ... loop body ...
607 //
608 // Here we expect store into private double var, not global
609 // CHECK-NOT: store double {{.+}}, double* [[GLOBALFLOAT]]
610     globalfloat = (float)j/i;
611     float res = b[j] * c[j];
612 // Store into a[i]:
613 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
614     a[i] = res * d[i];
615 // Then there's a store into private var localint:
616 // CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
617     localint = (int)j;
618 // CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
619 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1
620 // CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]]
621 //
622 // br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]]
623 // CHECK: [[WIDE1_END]]
624   }
625 // i,j are updated.
626 // CHECK: store i32 3, i32* [[I:%[^,]+]]
627 // CHECK: store i16
628 //
629 // Here we expect store into original localint, not its privatized version.
630 // CHECK-NOT: store i32 {{.+}}, i32* [[LOCALINT]]
631   localint = (int)j;
632 // CHECK: ret void
633 }
634 
635 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
636 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
637 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 15
638 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
639 // CHECK: [[TRUE]]
640 // CHECK: br label %[[SWITCH:[^,]+]]
641 // CHECK: [[FALSE]]
642 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
643 // CHECK: br label %[[SWITCH]]
644 // CHECK: [[SWITCH]]
645 // CHECK: [[UP:%.+]] = phi i64 [ 15, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
646 // CHECK: store i64 [[UP]], i64* [[UB]],
647 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
648 // CHECK: store i64 [[LB_VAL]], i64* [[T1_OMP_IV:%[^,]+]],
649 
650 // ...
651 // CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID:[0-9]+]]
652 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
653 // CHECK-NEXT: [[CMP1:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]]
654 // CHECK-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]]
655 // CHECK: [[T1_BODY]]
656 // Loop counters i and j updates:
657 // CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
658 // CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4
659 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1
660 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]]
661 // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32
662 // CHECK-NEXT: store i32 [[I_2]], i32* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
663 // CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
664 // CHECK-NEXT: [[J_1:%.+]] = srem i64 [[IV2]], 4
665 // CHECK-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2
666 // CHECK-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]]
667 // CHECK-NEXT: store i64 [[J_2_ADD0]], i64* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
668 // simd.for.inc:
669 // CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
670 // CHECK-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1
671 // CHECK-NEXT: store i64 [[INC]], i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
672 // CHECK-NEXT: br label {{%.+}}
673 // CHECK: [[T1_END]]
674 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
675 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
676 // CHECK: ret void
677 //
678 // TERM_DEBUG-LABEL: bar
679 int bar() {return 0;};
680 
681 // TERM_DEBUG-LABEL: parallel_simd
682 void parallel_simd(float *a) {
683 #pragma omp parallel for simd
684   // TERM_DEBUG-NOT: __kmpc_global_thread_num
685   // TERM_DEBUG:     invoke i32 {{.*}}bar{{.*}}()
686   // TERM_DEBUG:     unwind label %[[TERM_LPAD:.+]],
687   // TERM_DEBUG-NOT: __kmpc_global_thread_num
688   // TERM_DEBUG:     [[TERM_LPAD]]
689   // TERM_DEBUG:     call void @__clang_call_terminate
690   // TERM_DEBUG:     unreachable
691   for (unsigned i = 131071; i <= 2147483647; i += 127)
692     a[i] += bar();
693 }
694 // TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]],
695 #endif // HEADER
696 
697