1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG
5 // RUN: %clang_cc1 -main-file-name for_codegen.cpp %s -o - -emit-llvm -fprofile-instrument=clang -fprofile-instrument-path=for_codegen-test.profraw | FileCheck %s --check-prefix=PROF-INSTR-PATH
6 //
7 // expected-no-diagnostics
8 #ifndef HEADER
9 #define HEADER
10 // PROF-INSTR-PATH: constant [25 x i8] c"for_codegen-test.profraw\00"
11 
12 // CHECK: [[IDENT_T_TY:%.+]] = type { i32, i32, i32, i32, i8* }
13 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
14 // CHECK-DAG: [[LOOP_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 514, i32 0, i32 0, i8*
15 // CHECK-DAG: [[I:@.+]] = global i8 1,
16 // CHECK-DAG: [[J:@.+]] = global i8 2,
17 // CHECK-DAG: [[K:@.+]] = global i8 3,
18 
19 // CHECK-LABEL: loop_with_counter_collapse
20 void loop_with_counter_collapse() {
21   // CHECK: call void @__kmpc_for_static_init_8(%ident_t* @
22   // CHECK: call void @__kmpc_for_static_fini(%ident_t* @
23   #pragma omp for collapse(2)
24   for (int i = 0; i < 4; i++) {
25     for (int j = i; j < 4; j++) {
26     }
27   }
28 }
29 // CHECK-LABEL: define {{.*void}} @{{.*}}without_schedule_clause{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
30 void without_schedule_clause(float *a, float *b, float *c, float *d) {
31 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]])
32   #pragma omp for nowait
33 // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1)
34 // UB = min(UB, GlobalUB)
35 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
36 // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4571423
37 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]]
38 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4571423, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ]
39 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]]
40 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]]
41 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]]
42 // Loop header
43 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]
44 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
45 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]]
46 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]]
47   for (int i = 33; i < 32000000; i += 7) {
48 // CHECK: [[LOOP1_BODY]]
49 // Start of body: calculate i from IV:
50 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]
51 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 7
52 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 33, [[CALC_I_1]]
53 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
54 // ... loop body ...
55 // End of body: store into a[i]:
56 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}
57 // CHECK-NOT: !llvm.mem.parallel_loop_access
58     a[i] = b[i] * c[i] * d[i];
59 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}
60 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1
61 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]
62 // CHECK-NEXT: br label %{{.+}}
63   }
64 // CHECK: [[LOOP1_END]]
65 // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]])
66 // CHECK-NOT: __kmpc_barrier
67 // CHECK: ret void
68 }
69 
70 // CHECK-LABEL: define {{.*void}} @{{.*}}static_not_chunked{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
71 void static_not_chunked(float *a, float *b, float *c, float *d) {
72 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]])
73   #pragma omp for schedule(static)
74 // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1)
75 // UB = min(UB, GlobalUB)
76 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
77 // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4571423
78 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]]
79 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4571423, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ]
80 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]]
81 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]]
82 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]]
83 // Loop header
84 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]
85 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
86 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]]
87 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]]
88   for (int i = 32000000; i > 33; i += -7) {
89 // CHECK: [[LOOP1_BODY]]
90 // Start of body: calculate i from IV:
91 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]
92 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 7
93 // CHECK-NEXT: [[CALC_I_2:%.+]] = sub nsw i32 32000000, [[CALC_I_1]]
94 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
95 // ... loop body ...
96 // End of body: store into a[i]:
97 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}
98 // CHECK-NOT: !llvm.mem.parallel_loop_access
99     a[i] = b[i] * c[i] * d[i];
100 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}
101 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1
102 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]
103 // CHECK-NEXT: br label %{{.+}}
104   }
105 // CHECK: [[LOOP1_END]]
106 // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]])
107 // CHECK: call {{.+}} @__kmpc_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
108 // CHECK: ret void
109 }
110 
111 // CHECK-LABEL: define {{.*void}} @{{.*}}static_chunked{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
112 void static_chunked(float *a, float *b, float *c, float *d) {
113 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]])
114   #pragma omp for schedule(monotonic: static, 5)
115 // CHECK: call void @__kmpc_for_static_init_4u([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]], i32 536870945, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 5)
116 // UB = min(UB, GlobalUB)
117 // CHECK: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
118 // CHECK-NEXT: [[UBCMP:%.+]] = icmp ugt i32 [[UB]], 16908288
119 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]]
120 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 16908288, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ]
121 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]]
122 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]]
123 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]]
124 
125 // Outer loop header
126 // CHECK: [[O_IV:%.+]] = load i32, i32* [[OMP_IV]]
127 // CHECK-NEXT: [[O_UB:%.+]] = load i32, i32* [[OMP_UB]]
128 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ule i32 [[O_IV]], [[O_UB]]
129 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]]
130 
131 // Loop header
132 // CHECK: [[O_LOOP1_BODY]]
133 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]
134 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
135 // CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB]]
136 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]]
137   for (unsigned i = 131071; i <= 2147483647; i += 127) {
138 // CHECK: [[LOOP1_BODY]]
139 // Start of body: calculate i from IV:
140 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]
141 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i32 [[IV1_1]], 127
142 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 131071, [[CALC_I_1]]
143 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
144 // ... loop body ...
145 // End of body: store into a[i]:
146 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}
147 // CHECK-NOT: !llvm.mem.parallel_loop_access
148     a[i] = b[i] * c[i] * d[i];
149 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}
150 // CHECK-NEXT: [[ADD1_2:%.+]] = add i32 [[IV1_2]], 1
151 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]
152 // CHECK-NEXT: br label %{{.+}}
153   }
154 // CHECK: [[LOOP1_END]]
155 // Update the counters, adding stride
156 // CHECK:  [[LB:%.+]] = load i32, i32* [[OMP_LB]]
157 // CHECK-NEXT: [[ST:%.+]] = load i32, i32* [[OMP_ST]]
158 // CHECK-NEXT: [[ADD_LB:%.+]] = add i32 [[LB]], [[ST]]
159 // CHECK-NEXT: store i32 [[ADD_LB]], i32* [[OMP_LB]]
160 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
161 // CHECK-NEXT: [[ST:%.+]] = load i32, i32* [[OMP_ST]]
162 // CHECK-NEXT: [[ADD_UB:%.+]] = add i32 [[UB]], [[ST]]
163 // CHECK-NEXT: store i32 [[ADD_UB]], i32* [[OMP_UB]]
164 
165 // CHECK: [[O_LOOP1_END]]
166 // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]])
167 // CHECK: call {{.+}} @__kmpc_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
168 // CHECK: ret void
169 }
170 
171 // CHECK-LABEL: define {{.*void}} @{{.*}}dynamic1{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
172 void dynamic1(float *a, float *b, float *c, float *d) {
173 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]])
174   #pragma omp for schedule(nonmonotonic: dynamic)
175 // CHECK: call void @__kmpc_dispatch_init_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32 1073741859, i64 0, i64 16908287, i64 1, i64 1)
176 //
177 // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]])
178 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0
179 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]]
180 
181 // Loop header
182 // CHECK: [[O_LOOP1_BODY]]
183 // CHECK: [[LB:%.+]] = load i64, i64* [[OMP_LB]]
184 // CHECK-NEXT: store i64 [[LB]], i64* [[OMP_IV:[^,]+]]
185 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]
186 
187 // CHECK-NEXT: [[UB:%.+]] = load i64, i64* [[OMP_UB]]
188 // CHECK-NEXT: [[CMP:%.+]] = icmp ule i64 [[IV]], [[UB]]
189 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]]
190   for (unsigned long long i = 131071; i < 2147483647; i += 127) {
191 // CHECK: [[LOOP1_BODY]]
192 // Start of body: calculate i from IV:
193 // CHECK: [[IV1_1:%.+]] = load i64, i64* [[OMP_IV]]
194 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i64 [[IV1_1]], 127
195 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i64 131071, [[CALC_I_1]]
196 // CHECK-NEXT: store i64 [[CALC_I_2]], i64* [[LC_I:.+]]
197 // ... loop body ...
198 // End of body: store into a[i]:
199 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}!llvm.mem.parallel_loop_access
200     a[i] = b[i] * c[i] * d[i];
201 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}
202 // CHECK-NEXT: [[ADD1_2:%.+]] = add i64 [[IV1_2]], 1
203 // CHECK-NEXT: store i64 [[ADD1_2]], i64* [[OMP_IV]]
204 // CHECK-NEXT: br label %{{.+}}
205   }
206 // CHECK: [[LOOP1_END]]
207 // CHECK: [[O_LOOP1_END]]
208 // CHECK: call {{.+}} @__kmpc_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
209 // CHECK: ret void
210 }
211 
212 // CHECK-LABEL: define {{.*void}} @{{.*}}guided7{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
213 void guided7(float *a, float *b, float *c, float *d) {
214 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]])
215   #pragma omp for schedule(guided, 7)
216 // CHECK: call void @__kmpc_dispatch_init_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32 36, i64 0, i64 16908287, i64 1, i64 7)
217 //
218 // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]])
219 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0
220 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]]
221 
222 // Loop header
223 // CHECK: [[O_LOOP1_BODY]]
224 // CHECK: [[LB:%.+]] = load i64, i64* [[OMP_LB]]
225 // CHECK-NEXT: store i64 [[LB]], i64* [[OMP_IV:[^,]+]]
226 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]
227 
228 // CHECK-NEXT: [[UB:%.+]] = load i64, i64* [[OMP_UB]]
229 // CHECK-NEXT: [[CMP:%.+]] = icmp ule i64 [[IV]], [[UB]]
230 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]]
231   for (unsigned long long i = 131071; i < 2147483647; i += 127) {
232 // CHECK: [[LOOP1_BODY]]
233 // Start of body: calculate i from IV:
234 // CHECK: [[IV1_1:%.+]] = load i64, i64* [[OMP_IV]]
235 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i64 [[IV1_1]], 127
236 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i64 131071, [[CALC_I_1]]
237 // CHECK-NEXT: store i64 [[CALC_I_2]], i64* [[LC_I:.+]]
238 // ... loop body ...
239 // End of body: store into a[i]:
240 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}!llvm.mem.parallel_loop_access
241     a[i] = b[i] * c[i] * d[i];
242 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}
243 // CHECK-NEXT: [[ADD1_2:%.+]] = add i64 [[IV1_2]], 1
244 // CHECK-NEXT: store i64 [[ADD1_2]], i64* [[OMP_IV]]
245 // CHECK-NEXT: br label %{{.+}}
246   }
247 // CHECK: [[LOOP1_END]]
248 // CHECK: [[O_LOOP1_END]]
249 // CHECK: call {{.+}} @__kmpc_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
250 // CHECK: ret void
251 }
252 
253 // CHECK-LABEL: define {{.*void}} @{{.*}}test_auto{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
254 void test_auto(float *a, float *b, float *c, float *d) {
255   unsigned int x = 0;
256   unsigned int y = 0;
257 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]])
258   #pragma omp for schedule(auto) collapse(2)
259 // CHECK: call void @__kmpc_dispatch_init_8([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32 38, i64 0, i64 [[LAST_ITER:%[^,]+]], i64 1, i64 1)
260 //
261 // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]])
262 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0
263 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]]
264 
265 // Loop header
266 // CHECK: [[O_LOOP1_BODY]]
267 // CHECK: [[LB:%.+]] = load i64, i64* [[OMP_LB]]
268 // CHECK-NEXT: store i64 [[LB]], i64* [[OMP_IV:[^,]+]]
269 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]
270 
271 // CHECK-NEXT: [[UB:%.+]] = load i64, i64* [[OMP_UB]]
272 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i64 [[IV]], [[UB]]
273 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]]
274 // FIXME: When the iteration count of some nested loop is not a known constant,
275 // we should pre-calculate it, like we do for the total number of iterations!
276   for (char i = static_cast<char>(y); i <= '9'; ++i)
277     for (x = 11; x > 0; --x) {
278 // CHECK: [[LOOP1_BODY]]
279 // Start of body: indices are calculated from IV:
280 // CHECK: store i8 {{%[^,]+}}, i8* {{%[^,]+}}
281 // CHECK: store i32 {{%[^,]+}}, i32* {{%[^,]+}}
282 // ... loop body ...
283 // End of body: store into a[i]:
284 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}
285 // CHECK-NOT: !llvm.mem.parallel_loop_access
286     a[i] = b[i] * c[i] * d[i];
287 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}
288 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i64 [[IV1_2]], 1
289 // CHECK-NEXT: store i64 [[ADD1_2]], i64* [[OMP_IV]]
290 // CHECK-NEXT: br label %{{.+}}
291   }
292 // CHECK: [[LOOP1_END]]
293 // CHECK: [[O_LOOP1_END]]
294 // CHECK: call {{.+}} @__kmpc_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
295 // CHECK: ret void
296 }
297 
298 // CHECK-LABEL: define {{.*void}} @{{.*}}runtime{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
299 void runtime(float *a, float *b, float *c, float *d) {
300   int x = 0;
301 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]])
302   #pragma omp for collapse(2) schedule(runtime)
303 // CHECK: call void @__kmpc_dispatch_init_4([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32 37, i32 0, i32 199, i32 1, i32 1)
304 //
305 // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_4([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]])
306 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0
307 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]]
308 
309 // Loop header
310 // CHECK: [[O_LOOP1_BODY]]
311 // CHECK: [[LB:%.+]] = load i32, i32* [[OMP_LB]]
312 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]]
313 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]
314 
315 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]]
316 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]]
317 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]]
318   for (unsigned char i = '0' ; i <= '9'; ++i)
319     for (x = -10; x < 10; ++x) {
320 // CHECK: [[LOOP1_BODY]]
321 // Start of body: indices are calculated from IV:
322 // CHECK: store i8 {{%[^,]+}}, i8* {{%[^,]+}}
323 // CHECK: store i32 {{%[^,]+}}, i32* {{%[^,]+}}
324 // ... loop body ...
325 // End of body: store into a[i]:
326 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}
327 // CHECK-NOT: !llvm.mem.parallel_loop_access
328     a[i] = b[i] * c[i] * d[i];
329 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}
330 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1
331 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]
332 // CHECK-NEXT: br label %{{.+}}
333   }
334 // CHECK: [[LOOP1_END]]
335 // CHECK: [[O_LOOP1_END]]
336 // CHECK: call {{.+}} @__kmpc_barrier([[IDENT_T_TY]]* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
337 // CHECK: ret void
338 }
339 
340 // CHECK-LABEL: test_precond
341 void test_precond() {
342   // CHECK: [[A_ADDR:%.+]] = alloca i8,
343   // CHECK: [[I_ADDR:%.+]] = alloca i8,
344   // CHECK: [[CAP:%.+]] = alloca i8,
345   char a = 0;
346   // CHECK: store i8 0,
347   // CHECK: store i32
348   // CHECK: store i8
349   // CHECK: [[A:%.+]] = load i8, i8* [[CAP]],
350   // CHECK: [[CONV:%.+]] = sext i8 [[A]] to i32
351   // CHECK: [[CMP:%.+]] = icmp slt i32 [[CONV]], 10
352   // CHECK: br i1 [[CMP]], label %[[PRECOND_THEN:[^,]+]], label %[[PRECOND_END:[^,]+]]
353   // CHECK: [[PRECOND_THEN]]
354   // CHECK: call void @__kmpc_for_static_init_4
355 #pragma omp for
356   for(char i = a; i < 10; ++i);
357   // CHECK: call void @__kmpc_for_static_fini
358   // CHECK: [[PRECOND_END]]
359 }
360 
361 // TERM_DEBUG-LABEL: foo
362 int foo() {return 0;};
363 
364 // TERM_DEBUG-LABEL: parallel_for
365 void parallel_for(float *a) {
366 #pragma omp parallel
367 #pragma omp for schedule(static, 5)
368   // TERM_DEBUG-NOT: __kmpc_global_thread_num
369   // TERM_DEBUG:     call void @__kmpc_for_static_init_4u({{.+}}), !dbg [[DBG_LOC_START:![0-9]+]]
370   // TERM_DEBUG:     invoke i32 {{.*}}foo{{.*}}()
371   // TERM_DEBUG:     unwind label %[[TERM_LPAD:.+]],
372   // TERM_DEBUG-NOT: __kmpc_global_thread_num
373   // TERM_DEBUG:     call void @__kmpc_for_static_fini({{.+}}), !dbg [[DBG_LOC_END:![0-9]+]]
374   // TERM_DEBUG:     call {{.+}} @__kmpc_barrier({{.+}}), !dbg [[DBG_LOC_CANCEL:![0-9]+]]
375   // TERM_DEBUG:     [[TERM_LPAD]]
376   // TERM_DEBUG:     call void @__clang_call_terminate
377   // TERM_DEBUG:     unreachable
378   for (unsigned i = 131071; i <= 2147483647; i += 127)
379     a[i] += foo();
380 }
381 // Check source line corresponds to "#pragma omp for schedule(static, 5)" above:
382 // TERM_DEBUG-DAG: [[DBG_LOC_START]] = !DILocation(line: [[@LINE-15]],
383 // TERM_DEBUG-DAG: [[DBG_LOC_END]] = !DILocation(line: [[@LINE-16]],
384 // TERM_DEBUG-DAG: [[DBG_LOC_CANCEL]] = !DILocation(line: [[@LINE-17]],
385 
386 char i = 1, j = 2, k = 3;
387 // CHECK-LABEL: for_with_global_lcv
388 void for_with_global_lcv() {
389 // CHECK: alloca i8,
390 // CHECK: [[I_ADDR:%.+]] = alloca i8,
391 // CHECK: alloca i8,
392 // CHECK: [[J_ADDR:%.+]] = alloca i8,
393 
394 // CHECK: call void @__kmpc_for_static_init_4(
395 // CHECK-NOT: [[I]]
396 // CHECK: store i8 %{{.+}}, i8* [[I_ADDR]]
397 // CHECK-NOT: [[I]]
398 // CHECK: [[I_VAL:%.+]] = load i8, i8* [[I_ADDR]],
399 // CHECK-NOT: [[I]]
400 // CHECK: store i8 [[I_VAL]], i8* [[K]]
401 // CHECK-NOT: [[I]]
402 // CHECK: call void @__kmpc_for_static_fini(
403 // CHECK: call void @__kmpc_barrier(
404 #pragma omp for
405   for (i = 0; i < 2; ++i) {
406     k = i;
407   }
408 // CHECK: call void @__kmpc_for_static_init_4(
409 // CHECK-NOT: [[J]]
410 // CHECK: store i8 %{{.+}}, i8* [[J_ADDR]]
411 // CHECK-NOT: [[J]]
412 // CHECK: [[J_VAL:%.+]] = load i8, i8* [[J_ADDR]],
413 // CHECK-NOT: [[J]]
414 // CHECK: store i8 [[J_VAL]], i8* [[K]]
415 // CHECK-NOT: [[J]]
416 // CHECK: call void @__kmpc_for_static_fini(
417 #pragma omp for collapse(2)
418   for (int i = 0; i < 2; ++i)
419   for (j = 0; j < 2; ++j) {
420     k = i;
421     k = j;
422   }
423   char &cnt = i;
424 #pragma omp for
425   for (cnt = 0; cnt < 2; ++cnt)
426     k = cnt;
427 }
428 
429 // CHECK-LABEL: for_with_references
430 void for_with_references() {
431 // CHECK: [[I:%.+]] = alloca i8,
432 // CHECK: [[CNT:%.+]] = alloca i8*,
433 // CHECK: [[CNT_PRIV:%.+]] = alloca i8,
434 // CHECK: call void @__kmpc_for_static_init_4(
435 // CHECK-NOT: load i8, i8* [[CNT]],
436 // CHECK: call void @__kmpc_for_static_fini(
437   char i = 0;
438   char &cnt = i;
439 #pragma omp for
440   for (cnt = 0; cnt < 2; ++cnt)
441     k = cnt;
442 }
443 
444 struct Bool {
445   Bool(bool b) : b(b) {}
446   operator bool() const { return b; }
447   const bool b;
448 };
449 
450 template <typename T>
451 struct It {
452   It() : p(0) {}
453   It(const It &, int = 0) ;
454   template <typename U>
455   It(U &, int = 0) ;
456   It &operator=(const It &);
457   It &operator=(It &);
458   ~It() {}
459 
460   It(T *p) : p(p) {}
461 
462   operator T *&() { return p; }
463   operator T *() const { return p; }
464   T *operator->() const { return p; }
465 
466   It &operator++() { ++p; return *this; }
467   It &operator--() { --p; return *this; }
468   It &operator+=(unsigned n) { p += n; return *this; }
469   It &operator-=(unsigned n) { p -= n; return *this; }
470 
471   T *p;
472 };
473 
474 template <typename T>
475 It<T> operator+(It<T> a, typename It<T>::difference_type n) { return a.p + n; }
476 
477 template <typename T>
478 It<T> operator+(typename It<T>::difference_type n, It<T> a) { return a.p + n; }
479 
480 template <typename T>
481 It<T> operator-(It<T> a, typename It<T>::difference_type n) { return a.p - n; }
482 
483 typedef Bool BoolType;
484 
485 template <typename T>
486 BoolType operator<(It<T> a, It<T> b) { return a.p < b.p; }
487 
488 void loop_with_It(It<char> begin, It<char> end) {
489 #pragma omp for
490   for (It<char> it = begin; it < end; ++it) {
491     *it = 0;
492   }
493 }
494 
495 // CHECK-LABEL: loop_with_It
496 // CHECK: call i32 @__kmpc_global_thread_num(
497 // CHECK: call void @__kmpc_for_static_init_8(
498 // CHECK: call void @__kmpc_for_static_fini(
499 
500 void loop_with_It_plus(It<char> begin, It<char> end) {
501 #pragma omp for
502   for (It<char> it = begin; it < end; it+=1u) {
503     *it = 0;
504   }
505 }
506 
507 // CHECK-LABEL: loop_with_It_plus
508 // CHECK: call i32 @__kmpc_global_thread_num(
509 // CHECK: call void @__kmpc_for_static_init_8(
510 // CHECK: call void @__kmpc_for_static_fini(
511 
512 void loop_with_stmt_expr() {
513 #pragma omp for
514   for (int i = __extension__({float b = 0;b; }); i < __extension__({double c = 1;c; }); i += __extension__({char d = 1; d; }))
515     ;
516 }
517 // CHECK-LABEL: loop_with_stmt_expr
518 // CHECK: call i32 @__kmpc_global_thread_num(
519 // CHECK: call void @__kmpc_for_static_init_4(
520 // CHECK: call void @__kmpc_for_static_fini(
521 
522 
523 // CHECK-LABEL: fint
524 // CHECK: call {{.*}}i32 {{.*}}ftemplate
525 // CHECK: ret i32
526 
527 // CHECK: load i16, i16*
528 // CHECK: store i16 %
529 // CHECK: call void {{.+}}@__kmpc_fork_call(
530 // CHECK: call void @__kmpc_for_static_init_4(
531 template <typename T>
532 T ftemplate() {
533   short aa = 0;
534 
535 #pragma omp parallel for schedule(static, aa)
536   for (int i = 0; i < 100; i++) {
537   }
538   return T();
539 }
540 
541 int fint(void) { return ftemplate<int>(); }
542 
543 #endif // HEADER
544