1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 // expected-no-diagnostics
7 // REQUIRES: x86-registered-target
8 #ifndef HEADER
9 #define HEADER
10 
11 template <class T>
12 struct S {
13   T f;
14   S(T a) : f(a) {}
15   S() : f() {}
16   S<T> &operator=(const S<T> &);
17   operator T() { return T(); }
18   ~S() {}
19 };
20 
21 volatile int g = 1212;
22 float f;
23 char cnt;
24 
25 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
26 // CHECK: [[CAP_MAIN_TY:%.+]] = type { float**, i64* }
27 // CHECK: [[S_INT_TY:%.+]] = type { i32 }
28 // CHECK: [[CAP_TMAIN_TY:%.+]] = type { i32**, i32* }
29 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
30 // CHECK-DAG: [[F:@.+]] = global float 0.0
31 // CHECK-DAG: [[CNT:@.+]] = global i8 0
32 template <typename T>
33 T tmain() {
34   S<T> test;
35   T *pvar = &test.f;
36   T lvar = T();
37 #pragma omp parallel
38 #pragma omp for linear(pvar, lvar)
39   for (int i = 0; i < 2; ++i) {
40     ++pvar, ++lvar;
41   }
42   return T();
43 }
44 
45 int main() {
46 #ifdef LAMBDA
47   // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
48   // LAMBDA-LABEL: @main
49   // LAMBDA: call void [[OUTER_LAMBDA:@.+]](
50   [&]() {
51   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
52   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
53 #pragma omp parallel
54 #pragma omp for linear(g:5)
55   for (int i = 0; i < 2; ++i) {
56     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
57     // LAMBDA: alloca i{{[0-9]+}},
58     // LAMBDA: [[G_START_ADDR:%.+]] = alloca i{{[0-9]+}},
59     // LAMBDA: alloca i{{[0-9]+}},
60     // LAMBDA: alloca i{{[0-9]+}},
61     // LAMBDA: alloca i{{[0-9]+}},
62     // LAMBDA: alloca i{{[0-9]+}},
63     // LAMBDA: alloca i{{[0-9]+}},
64     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
65     // LAMBDA: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]],
66     // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
67     // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
68     // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
69     // LAMBDA: [[VAL:%.+]] = load i32, i32* [[G_START_ADDR]]
70     // LAMBDA: [[CNT:%.+]] = load i32, i32*
71     // LAMBDA: [[MUL:%.+]] = mul nsw i32 [[CNT]], 5
72     // LAMBDA: [[ADD:%.+]] = add nsw i32 [[VAL]], [[MUL]]
73     // LAMBDA: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]],
74     // LAMBDA: [[VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]],
75     // LAMBDA: [[ADD:%.+]] = add nsw i32 [[VAL]], 5
76     // LAMBDA: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]],
77     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
78     // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
79     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
80     // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
81     g += 5;
82     // LAMBDA: call i32 @__kmpc_cancel_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
83     [&]() {
84       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
85       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
86       g = 2;
87       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
88       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
89       // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
90       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]]
91     }();
92   }
93   }();
94   return 0;
95 #elif defined(BLOCKS)
96   // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
97   // BLOCKS-LABEL: @main
98   // BLOCKS: call void {{%.+}}(i8
99   ^{
100   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
101   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}})
102 #pragma omp parallel
103 #pragma omp for linear(g:5)
104   for (int i = 0; i < 2; ++i) {
105     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
106     // BLOCKS: alloca i{{[0-9]+}},
107     // BLOCKS: [[G_START_ADDR:%.+]] = alloca i{{[0-9]+}},
108     // BLOCKS: alloca i{{[0-9]+}},
109     // BLOCKS: alloca i{{[0-9]+}},
110     // BLOCKS: alloca i{{[0-9]+}},
111     // BLOCKS: alloca i{{[0-9]+}},
112     // BLOCKS: alloca i{{[0-9]+}},
113     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
114     // BLOCKS: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]],
115     // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
116     // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
117     // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
118     // BLOCKS: [[VAL:%.+]] = load i32, i32* [[G_START_ADDR]]
119     // BLOCKS: [[CNT:%.+]] = load i32, i32*
120     // BLOCKS: [[MUL:%.+]] = mul nsw i32 [[CNT]], 5
121     // BLOCKS: [[ADD:%.+]] = add nsw i32 [[VAL]], [[MUL]]
122     // BLOCKS: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]],
123     // BLOCKS: [[VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]],
124     // BLOCKS: [[ADD:%.+]] = add nsw i32 [[VAL]], 5
125     // BLOCKS: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]],
126     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
127     // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
128     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
129     // BLOCKS: call void {{%.+}}(i8
130     // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
131     g += 5;
132     // BLOCKS: call i32 @__kmpc_cancel_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
133     g = 1;
134     ^{
135       // BLOCKS: define {{.+}} void {{@.+}}(i8*
136       g = 2;
137       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
138       // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
139       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
140       // BLOCKS: ret
141     }();
142   }
143   }();
144   return 0;
145 #else
146   S<float> test;
147   float *pvar = &test.f;
148   long long lvar = 0;
149 #pragma omp parallel
150 #pragma omp for linear(pvar, lvar : 3)
151   for (int i = 0; i < 2; ++i) {
152     pvar += 3, lvar += 3;
153   }
154   return tmain<int>();
155 #endif
156 }
157 
158 // CHECK: define i{{[0-9]+}} @main()
159 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
160 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
161 // CHECK: %{{.+}} = bitcast [[CAP_MAIN_TY]]*
162 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[CAP_MAIN_TY]]*)* [[MAIN_MICROTASK:@.+]] to void
163 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]()
164 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
165 // CHECK: ret
166 
167 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, [[CAP_MAIN_TY]]* %{{.+}})
168 // CHECK: alloca i{{[0-9]+}},
169 // CHECK: [[PVAR_START:%.+]] = alloca float*,
170 // CHECK: [[LVAR_START:%.+]] = alloca i64,
171 // CHECK: alloca i{{[0-9]+}},
172 // CHECK: alloca i{{[0-9]+}},
173 // CHECK: alloca i{{[0-9]+}},
174 // CHECK: alloca i{{[0-9]+}},
175 // CHECK: alloca i{{[0-9]+}},
176 // CHECK: [[PVAR_PRIV:%.+]] = alloca float*,
177 // CHECK: [[LVAR_PRIV:%.+]] = alloca i64,
178 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
179 
180 // Check for default initialization.
181 // CHECK: [[PVAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
182 // CHECK: [[PVAR_REF:%.+]] = load float**, float*** [[PVAR_PTR_REF]],
183 // CHECK: [[PVAR_VAL:%.+]] = load float*, float** [[PVAR_REF]],
184 // CHECK: store float* [[PVAR_VAL]], float** [[PVAR_START]],
185 // CHECK: [[LVAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 1
186 // CHECK: [[LVAR_REF:%.+]] = load i64*, i64** [[LVAR_PTR_REF]],
187 // CHECK: [[LVAR_VAL:%.+]] = load i64, i64* [[LVAR_REF]],
188 // CHECK: store i64 [[LVAR_VAL]], i64* [[LVAR_START]],
189 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
190 // CHECK: [[PVAR_VAL:%.+]] = load float*, float** [[PVAR_START]],
191 // CHECK: [[CNT:%.+]] = load i32, i32*
192 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 3
193 // CHECK: [[IDX:%.+]] = sext i32 [[MUL]] to i64
194 // CHECK: [[PTR:%.+]] = getelementptr inbounds float, float* [[PVAR_VAL]], i64 [[IDX]]
195 // CHECK: store float* [[PTR]], float** [[PVAR_PRIV]],
196 // CHECK: [[LVAR_VAL:%.+]] = load i64, i64* [[LVAR_START]],
197 // CHECK: [[CNT:%.+]] = load i32, i32*
198 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 3
199 // CHECK: [[CONV:%.+]] = sext i32 [[MUL]] to i64
200 // CHECK: [[VAL:%.+]] = add nsw i64 [[LVAR_VAL]], [[CONV]]
201 // CHECK: store i64 [[VAL]], i64* [[LVAR_PRIV]],
202 // CHECK: [[PVAR_VAL:%.+]] = load float*, float** [[PVAR_PRIV]]
203 // CHECK: [[PTR:%.+]] = getelementptr inbounds float, float* [[PVAR_VAL]], i64 3
204 // CHECK: store float* [[PTR]], float** [[PVAR_PRIV]],
205 // CHECK: [[LVAR_VAL:%.+]] = load i64, i64* [[LVAR_PRIV]],
206 // CHECK: [[ADD:%.+]] = add nsw i64 [[LVAR_VAL]], 3
207 // CHECK: store i64 [[ADD]], i64* [[LVAR_PRIV]],
208 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
209 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
210 // CHECK: ret void
211 
212 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
213 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
214 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
215 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[CAP_TMAIN_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void
216 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
217 // CHECK: ret
218 //
219 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, [[CAP_TMAIN_TY]]* %{{.+}})
220 // CHECK: alloca i{{[0-9]+}},
221 // CHECK: [[PVAR_START:%.+]] = alloca i32*,
222 // CHECK: [[LVAR_START:%.+]] = alloca i32,
223 // CHECK: alloca i{{[0-9]+}},
224 // CHECK: alloca i{{[0-9]+}},
225 // CHECK: alloca i{{[0-9]+}},
226 // CHECK: alloca i{{[0-9]+}},
227 // CHECK: alloca i{{[0-9]+}},
228 // CHECK: [[PVAR_PRIV:%.+]] = alloca i32*,
229 // CHECK: [[LVAR_PRIV:%.+]] = alloca i32,
230 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
231 
232 // Check for default initialization.
233 // CHECK: [[PVAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
234 // CHECK: [[PVAR_REF:%.+]] = load i32**, i32*** [[PVAR_PTR_REF]],
235 // CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_REF]],
236 // CHECK: store i32* [[PVAR_VAL]], i32** [[PVAR_START]],
237 // CHECK: [[LVAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 1
238 // CHECK: [[LVAR_REF:%.+]] = load i32*, i32** [[LVAR_PTR_REF]],
239 // CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_REF]],
240 // CHECK: store i32 [[LVAR_VAL]], i32* [[LVAR_START]],
241 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
242 // CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_START]],
243 // CHECK: [[CNT:%.+]] = load i32, i32*
244 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 1
245 // CHECK: [[IDX:%.+]] = sext i32 [[MUL]] to i64
246 // CHECK: [[PTR:%.+]] = getelementptr inbounds i32, i32* [[PVAR_VAL]], i64 [[IDX]]
247 // CHECK: store i32* [[PTR]], i32** [[PVAR_PRIV]],
248 // CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_START]],
249 // CHECK: [[CNT:%.+]] = load i32, i32*
250 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 1
251 // CHECK: [[VAL:%.+]] = add nsw i32 [[LVAR_VAL]], [[MUL]]
252 // CHECK: store i32 [[VAL]], i32* [[LVAR_PRIV]],
253 // CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_PRIV]]
254 // CHECK: [[PTR:%.+]] = getelementptr inbounds i32, i32* [[PVAR_VAL]], i32 1
255 // CHECK: store i32* [[PTR]], i32** [[PVAR_PRIV]],
256 // CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_PRIV]],
257 // CHECK: [[ADD:%.+]] = add nsw i32 [[LVAR_VAL]], 1
258 // CHECK: store i32 [[ADD]], i32* [[LVAR_PRIV]],
259 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
260 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
261 // CHECK: ret void
262 #endif
263 
264