1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 // expected-no-diagnostics
7 // REQUIRES: x86-registered-target
8 #ifndef HEADER
9 #define HEADER
10 
11 template <class T>
12 struct S {
13   T f;
14   S(T a) : f(a) {}
15   S() : f() {}
16   S<T> &operator=(const S<T> &);
17   operator T() { return T(); }
18   ~S() {}
19 };
20 
21 volatile int g = 1212;
22 
23 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
24 // CHECK: [[S_INT_TY:%.+]] = type { i32 }
25 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
26 // CHECK-DAG: [[SINGLE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 322, i32 0, i32 0, i8*
27 // CHECK-DAG: [[SECTIONS_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 194, i32 0, i32 0, i8*
28 // CHECK-DAG: [[X:@.+]] = global double 0.0
29 template <typename T>
30 T tmain() {
31   S<T> test;
32   T t_var = T();
33   T vec[] = {1, 2};
34   S<T> s_arr[] = {1, 2};
35   S<T> var(3);
36 #pragma omp parallel
37 #pragma omp sections lastprivate(t_var, vec, s_arr, var)
38   {
39     vec[0] = t_var;
40 #pragma omp section
41     s_arr[0] = var;
42   }
43   return T();
44 }
45 
46 namespace A {
47 double x;
48 }
49 namespace B {
50 using A::x;
51 }
52 
53 int main() {
54 #ifdef LAMBDA
55   // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
56   // LAMBDA-LABEL: @main
57   // LAMBDA: call void [[OUTER_LAMBDA:@.+]](
58   [&]() {
59   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
60   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
61 #pragma omp parallel
62 #pragma omp sections lastprivate(g)
63   {
64     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
65     // LAMBDA: alloca i{{[0-9]+}},
66     // LAMBDA: alloca i{{[0-9]+}},
67     // LAMBDA: alloca i{{[0-9]+}},
68     // LAMBDA: alloca i{{[0-9]+}},
69     // LAMBDA: alloca i{{[0-9]+}},
70     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
71     // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
72     // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
73     // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
74     // LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
75     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
76     // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
77     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
78     // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
79     g = 1;
80     // Check for final copying of private values back to original vars.
81     // LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
82     // LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
83     // LAMBDA: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
84     // LAMBDA: [[LAST_THEN]]
85     // Actual copying.
86 
87     // original g=private_g;
88     // LAMBDA: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
89     // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
90     // LAMBDA: br label %[[LAST_DONE]]
91     // LAMBDA: [[LAST_DONE]]
92     // LAMBDA: call i32 @__kmpc_cancel_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
93 #pragma omp section
94     [&]() {
95       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
96       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
97       g = 2;
98       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
99       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
100       // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
101       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]]
102     }();
103   }
104   }();
105   return 0;
106 #elif defined(BLOCKS)
107   // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
108   // BLOCKS-LABEL: @main
109   // BLOCKS: call void {{%.+}}(i8
110   ^{
111   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
112   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
113 #pragma omp parallel
114 #pragma omp sections lastprivate(g)
115   {
116     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
117     // BLOCKS: alloca i{{[0-9]+}},
118     // BLOCKS: alloca i{{[0-9]+}},
119     // BLOCKS: alloca i{{[0-9]+}},
120     // BLOCKS: alloca i{{[0-9]+}},
121     // BLOCKS: alloca i{{[0-9]+}},
122     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
123     // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}
124     // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
125     // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
126     // BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
127     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
128     // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
129     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
130     // BLOCKS: call void {{%.+}}(i8
131     // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
132     g = 1;
133     // Check for final copying of private values back to original vars.
134     // BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
135     // BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
136     // BLOCKS: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
137     // BLOCKS: [[LAST_THEN]]
138     // Actual copying.
139 
140     // original g=private_g;
141     // BLOCKS: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
142     // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
143     // BLOCKS: br label %[[LAST_DONE]]
144     // BLOCKS: [[LAST_DONE]]
145     // BLOCKS: call i32 @__kmpc_cancel_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]])
146 #pragma omp section
147     ^{
148       // BLOCKS: define {{.+}} void {{@.+}}(i8*
149       g = 2;
150       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
151       // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
152       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
153       // BLOCKS: ret
154     }();
155   }
156   }();
157   return 0;
158 #else
159   S<float> test;
160   int t_var = 0;
161   int vec[] = {1, 2};
162   S<float> s_arr[] = {1, 2};
163   S<float> var(3);
164 #pragma omp parallel
165 #pragma omp sections lastprivate(t_var, vec, s_arr, var)
166   {
167     {
168     vec[0] = t_var;
169     s_arr[0] = var;
170     }
171   }
172 #pragma omp parallel
173 #pragma omp sections lastprivate(A::x, B::x)
174   {
175     A::x++;
176 #pragma omp section
177     ;
178   }
179   return tmain<int>();
180 #endif
181 }
182 
183 // CHECK: define i{{[0-9]+}} @main()
184 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
185 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
186 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*)* [[MAIN_MICROTASK:@.+]] to void
187 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK1:@.+]] to void
188 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]()
189 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
190 // CHECK: ret
191 
192 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
193 // CHECK-NOT: alloca i{{[0-9]+}},
194 // CHECK-NOT: alloca [2 x i{{[0-9]+}}],
195 // CHECK-NOT: alloca [2 x [[S_FLOAT_TY]]],
196 // CHECK-NOT: alloca [[S_FLOAT_TY]],
197 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
198 
199 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
200 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
201 // CHECK: call i32 @__kmpc_single(
202 
203 // <Skip loop body>
204 
205 // CHECK-NOT: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
206 // CHECK-NOT: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
207 
208 // CHECK: call void @__kmpc_end_single(
209 
210 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[SINGLE_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
211 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
212 // CHECK: ret void
213 
214 //
215 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
216 // CHECK: [[X_PRIV:%.+]] = alloca double,
217 // CHECK-NOT: alloca double
218 
219 // Check for default initialization.
220 // CHECK-NOT: [[X_PRIV]]
221 
222 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
223 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
224 // CHECK: call void @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
225 // <Skip loop body>
226 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
227 
228 // Check for final copying of private values back to original vars.
229 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
230 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
231 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
232 // CHECK: [[LAST_THEN]]
233 // Actual copying.
234 
235 // original x=private_x;
236 // CHECK: [[X_VAL:%.+]] = load double, double* [[X_PRIV]],
237 // CHECK: store double [[X_VAL]], double* [[X]],
238 // CHECK-NEXT: br label %[[LAST_DONE]]
239 // CHECK: [[LAST_DONE]]
240 
241 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[SECTIONS_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
242 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
243 // CHECK: ret void
244 
245 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
246 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
247 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
248 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void
249 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
250 // CHECK: ret
251 //
252 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
253 // CHECK: alloca i{{[0-9]+}},
254 // CHECK: alloca i{{[0-9]+}},
255 // CHECK: alloca i{{[0-9]+}},
256 // CHECK: alloca i{{[0-9]+}},
257 // CHECK: alloca i{{[0-9]+}},
258 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
259 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
260 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]],
261 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]],
262 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
263 
264 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
265 // CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** %
266 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** %
267 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
268 
269 // Check for default initialization.
270 // CHECK-NOT: [[T_VAR_PRIV]]
271 // CHECK-NOT: [[VEC_PRIV]]
272 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]*
273 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]])
274 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]])
275 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
276 // <Skip loop body>
277 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
278 
279 // Check for final copying of private values back to original vars.
280 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
281 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
282 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
283 // CHECK: [[LAST_THEN]]
284 // Actual copying.
285 
286 // original t_var=private_t_var;
287 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
288 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
289 
290 // original vec[]=private_vec[];
291 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
292 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
293 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VEC_DEST]], i8* [[VEC_SRC]],
294 
295 // original s_arr[]=private_s_arr[];
296 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
297 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]*
298 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
299 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
300 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
301 // CHECK: [[S_ARR_BODY]]
302 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
303 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
304 // CHECK: [[S_ARR_BODY_DONE]]
305 
306 // original var=private_var;
307 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV]])
308 // CHECK: br label %[[LAST_DONE]]
309 // CHECK: [[LAST_DONE]]
310 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
311 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]*
312 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
313 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
314 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[SECTIONS_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
315 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
316 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
317 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
318 // CHECK: ret void
319 #endif
320 
321