1 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64
2 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
3 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64
4 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32
5 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
6 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32
7 
8 // RUN: %clang_cc1  -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
9 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
10 // RUN: %clang_cc1  -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
11 // RUN: %clang_cc1  -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
12 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
13 // RUN: %clang_cc1  -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
14 // expected-no-diagnostics
15 #ifndef HEADER
16 #define HEADER
17 
18 template <class T>
19 struct S {
20   T f;
21   S(T a) : f(a) {}
22   S() : f() {}
23   operator T() { return T(); }
24   ~S() {}
25 };
26 
27 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
28 // CHECK: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
29 template <typename T>
30 T tmain() {
31   S<T> test;
32   T t_var = T();
33   T vec[] = {1, 2};
34   S<T> s_arr[] = {1, 2};
35   S<T> &var = test;
36   #pragma omp target
37   #pragma omp teams
38   #pragma omp distribute parallel for private(t_var, vec, s_arr, s_arr, var, var)
39   for (int i = 0; i < 2; ++i) {
40     vec[i] = t_var;
41     s_arr[i] = var;
42   }
43   return T();
44 }
45 
46 int main() {
47   static int svar;
48   volatile double g;
49   volatile double &g1 = g;
50 
51   #ifdef LAMBDA
52   // LAMBDA-LABEL: @main
53   // LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]](
54   [&]() {
55     static float sfvar;
56     // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
57     // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams(
58     // LAMBDA: call void [[OFFLOADING_FUN:@.+]](
59 
60     // LAMBDA: define{{.+}} void [[OFFLOADING_FUN]]()
61     // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 0, {{.+}}* [[OMP_OUTLINED:@.+]] to {{.+}})
62     #pragma omp target
63     #pragma omp teams
64     #pragma omp distribute parallel for private(g, g1, svar, sfvar)
65     for (int i = 0; i < 2; ++i) {
66       // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}})
67       // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double,
68       // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca double,
69       // LAMBDA: [[TMP_PRIVATE_ADDR:%.+]] = alloca double*,
70       // LAMBDA: [[SVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
71       // LAMBDA: [[SFVAR_PRIVATE_ADDR:%.+]] = alloca float,
72       // LAMBDA: store double* [[G1_PRIVATE_ADDR]], double** [[TMP_PRIVATE_ADDR]],
73       // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4(
74       // LAMBDA: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED:@.+]] to {{.+}},
75       // LAMBDA: call {{.*}}void @__kmpc_for_static_fini(
76       // LAMBDA: ret void
77 
78       // LAMBDA: define{{.+}} void [[OMP_PARFOR_OUTLINED]](
79       // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double,
80       // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca double,
81       // LAMBDA: [[TMP_PRIVATE_ADDR:%.+]] = alloca double*,
82       // LAMBDA: [[SVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
83       // LAMBDA: [[SFVAR_PRIVATE_ADDR:%.+]] = alloca float,
84 
85       g = 1;
86       g1 = 1;
87       svar = 3;
88       sfvar = 4.0;
89       // LAMBDA: store double* [[G1_PRIVATE_ADDR]], double** [[TMP_PRIVATE_ADDR]],
90       // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]],
91       // LAMBDA: store i{{[0-9]+}} 3, i{{[0-9]+}}* [[SVAR_PRIVATE_ADDR]],
92       // LAMBDA: store float 4.0{{.+}}, float* [[SFVAR_PRIVATE_ADDR]],
93       // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
94       // LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]],
95       // LAMBDA: [[TMP_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
96       // LAMBDA: [[G1_PRIVATE_ADDR_FROM_TMP:%.+]] = load double*, double** [[TMP_PRIVATE_ADDR]],
97       // LAMBDA: store double* [[G1_PRIVATE_ADDR_FROM_TMP]], double** [[TMP_PRIVATE_ADDR_REF]],
98       // LAMBDA: [[SVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
99       // LAMBDA: store i{{[0-9]+}}* [[SVAR_PRIVATE_ADDR]], i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR_REF]]
100       // LAMBDA: [[SFVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 3
101       // LAMBDA: store float* [[SFVAR_PRIVATE_ADDR]], float** [[SFVAR_PRIVATE_ADDR_REF]]
102       // LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
103       // LAMBDA: call {{.*}}void @__kmpc_for_static_fini(
104       // LAMBDA: ret void
105       [&]() {
106 	// LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
107 	// LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
108 	g = 2;
109 	g1 = 2;
110 	svar = 4;
111 	sfvar = 8.0;
112 	// LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
113 	// LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
114 	// LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]]
115 	// LAMBDA: store double 2.0{{.+}}, double* [[G_REF]]
116 
117 	// LAMBDA: [[TMP_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
118 	// LAMBDA: [[G1_REF:%.+]] = load double*, double** [[TMP_PTR_REF]]
119 	// LAMBDA: store double 2.0{{.+}}, double* [[G1_REF]],
120 	// LAMBDA: [[SVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
121 	// LAMBDA: [[SVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PTR_REF]]
122 	// LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SVAR_REF]]
123 	// LAMBDA: [[SFVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 3
124 	// LAMBDA: [[SFVAR_REF:%.+]] = load float*, float** [[SFVAR_PTR_REF]]
125 	// LAMBDA: store float 8.0{{.+}}, float* [[SFVAR_REF]]
126       }();
127     }
128   }();
129   return 0;
130   #else
131   S<float> test;
132   int t_var = 0;
133   int vec[] = {1, 2};
134   S<float> s_arr[] = {1, 2};
135   S<float> &var = test;
136 
137   #pragma omp target
138   #pragma omp teams
139   #pragma omp distribute parallel for private(t_var, vec, s_arr, s_arr, var, var, svar)
140   for (int i = 0; i < 2; ++i) {
141     vec[i] = t_var;
142     s_arr[i] = var;
143   }
144   return tmain<int>();
145   #endif
146 }
147 
148 // CHECK: define{{.*}} i{{[0-9]+}} @main()
149 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
150 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
151 // CHECK: call i{{[0-9]+}} @__tgt_target_teams(
152 // CHECK: call void [[OFFLOAD_FUN_0:@.+]](
153 
154 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_DESTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
155 // CHECK: ret
156 
157 // CHECK: define{{.+}} [[OFFLOAD_FUN_0]]()
158 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[OMP_OUTLINED_0:@.+]] to void
159 // CHECK: ret
160 //
161 // CHECK: define internal void [[OMP_OUTLINED_0]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
162 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
163 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
164 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]],
165 // CHECK-NOT: alloca [2 x [[S_FLOAT_TY]]],
166 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]],
167 // CHECK-NOT: alloca [[S_FLOAT_TY]],
168 // CHECK: [[S_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
169 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
170 // CHECK-NOT: [[T_VAR_PRIV]]
171 // CHECK-NOT: [[VEC_PRIV]]
172 // this is the ctor loop
173 // CHECK: {{.+}}:
174 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]*
175 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]])
176 // CHECK-NOT: [[T_VAR_PRIV]]
177 // CHECK-NOT: [[VEC_PRIV]]
178 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
179 // CHECK: call void @__kmpc_for_static_init_4(
180 // CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_0:@.+]] to {{.+}},
181 // CHECK: call void @__kmpc_for_static_fini(
182 
183 // call destructors: var..
184 // CHECK-DAG: call {{.+}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
185 
186 // ..and s_arr
187 // CHECK: {{.+}}:
188 // CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_FLOAT_TY]]*
189 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]],
190 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]])
191 
192 // CHECK: ret void
193 
194 // By OpenMP specifications, private applies to both distribute and parallel for.
195 // However, the support for 'private' of 'parallel' is only used when 'parallel'
196 // is found alone. Therefore we only have one 'private' support for 'parallel for'
197 // in combination
198 // CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_0]](
199 // CHECK: [[T_VAR_PRIV:%t_var+]] = alloca i{{[0-9]+}},
200 // CHECK: [[VEC_PRIV:%vec+]] = alloca [2 x i{{[0-9]+}}],
201 // CHECK: [[S_ARR_PRIV:%s_arr+]] = alloca [2 x [[S_FLOAT_TY]]],
202 // CHECK-NOT: alloca [2 x [[S_FLOAT_TY]]],
203 // CHECK: [[VAR_PRIV:%var+]] = alloca [[S_FLOAT_TY]],
204 // CHECK-NOT: alloca [[S_FLOAT_TY]],
205 // CHECK: [[S_VAR_PRIV:%svar+]] = alloca i{{[0-9]+}},
206 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
207 // CHECK-NOT: [[T_VAR_PRIV]]
208 // CHECK-NOT: [[VEC_PRIV]]
209 // this is the ctor loop
210 // CHECK: {{.+}}:
211 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]*
212 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]])
213 // CHECK-NOT: [[T_VAR_PRIV]]
214 // CHECK-NOT: [[VEC_PRIV]]
215 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
216 // CHECK: call void @__kmpc_for_static_init_4(
217 // CHECK: call void @__kmpc_for_static_fini(
218 
219 // call destructors: var..
220 // CHECK-DAG: call {{.+}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]])
221 
222 // ..and s_arr
223 // CHECK: {{.+}}:
224 // CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_FLOAT_TY]]*
225 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]],
226 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_DESTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]])
227 
228 // CHECK: ret void
229 
230 // template tmain with S_INT_TY
231 // CHECK: define{{.*}} i{{[0-9]+}} [[TMAIN_INT:@.+]]()
232 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
233 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
234 // CHECK: call i{{[0-9]+}} @__tgt_target_teams(
235 // CHECK: call void [[OFFLOAD_FUN_1:@.+]](
236 // CHECK: call {{.*}} [[S_INT_TY_DEF_DESTR:@.+]]([[S_INT_TY]]* [[TEST]])
237 // CHECK: ret
238 
239 // CHECK: ret
240 
241 // CHECK: define internal void [[OFFLOAD_FUN_1]]()
242 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[OMP_OUTLINED_1:@.+]] to void
243 // CHECK: ret
244 //
245 // CHECK: define internal void [[OMP_OUTLINED_1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
246 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
247 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
248 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]],
249 // CHECK-NOT: alloca [2 x [[S_INT_TY]]],
250 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]],
251 // CHECK-NOT: alloca [[S_INT_TY]],
252 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
253 // CHECK-NOT: [[T_VAR_PRIV]]
254 // CHECK-NOT: [[VEC_PRIV]]
255 // CHECK: {{.+}}:
256 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]*
257 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]])
258 // CHECK-NOT: [[T_VAR_PRIV]]
259 // CHECK-NOT: [[VEC_PRIV]]
260 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]])
261 // CHECK: call void @__kmpc_for_static_init_4(
262 // CHECK: call{{.+}} @__kmpc_fork_call({{.+}}, {{.+}}, {{.+}}[[OMP_PARFOR_OUTLINED_1:@.+]] to {{.+}},
263 // CHECK: call void @__kmpc_for_static_fini(
264 // CHECK: ret void
265 
266 // CHECK: define{{.+}} void [[OMP_PARFOR_OUTLINED_1]](
267 // CHECK: [[T_VAR_PRIV:%t_var+]] = alloca i{{[0-9]+}},
268 // CHECK: [[VEC_PRIV:%vec+]] = alloca [2 x i{{[0-9]+}}],
269 // CHECK: [[S_ARR_PRIV:%s_arr+]] = alloca [2 x [[S_INT_TY]]],
270 // CHECK-NOT: alloca [2 x [[S_INT_TY]]],
271 // CHECK: [[VAR_PRIV:%var+]] = alloca [[S_INT_TY]],
272 // CHECK-NOT: alloca [[S_INT_TY]],
273 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
274 // CHECK-NOT: [[T_VAR_PRIV]]
275 // CHECK-NOT: [[VEC_PRIV]]
276 // this is the ctor loop
277 // CHECK: {{.+}}:
278 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]*
279 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]])
280 // CHECK-NOT: [[T_VAR_PRIV]]
281 // CHECK-NOT: [[VEC_PRIV]]
282 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]])
283 // CHECK: call void @__kmpc_for_static_init_4(
284 // CHECK: call void @__kmpc_for_static_fini(
285 
286 // call destructors: var..
287 // CHECK-DAG: call {{.+}} [[S_INT_TY_DEF_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
288 
289 // ..and s_arr
290 // CHECK: {{.+}}:
291 // CHECK: [[S_ARR_EL_PAST:%.+]] = phi [[S_INT_TY]]*
292 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = getelementptr {{.+}}, {{.+}} [[S_ARR_EL_PAST]],
293 // CHECK: call {{.*}} [[S_INT_TY_DEF_DESTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]])
294 
295 // CHECK: ret void
296 
297 #endif
298