1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK4
7
8 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
9 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
11 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // expected-no-diagnostics
14 #ifndef HEADER
15 #define HEADER
16
17 struct St {
18 int a, b;
StSt19 St() : a(0), b(0) {}
StSt20 St(const St &st) : a(st.a + st.b), b(0) {}
~StSt21 ~St() {}
22 };
23
24 volatile int g = 1212;
25 volatile int &g1 = g;
26
27 template <class T>
28 struct S {
29 T f;
SS30 S(T a) : f(a + g) {}
SS31 S() : f(g) {}
SS32 S(const S &s, St t = St()) : f(s.f + t.a) {}
operator TS33 operator T() { return T(); }
~SS34 ~S() {}
35 };
36
37
38 template <typename T>
tmain()39 T tmain() {
40 S<T> test;
41 T t_var = T();
42 T vec[] = {1, 2};
43 S<T> s_arr[] = {1, 2};
44 S<T> &var = test;
45 #pragma omp parallel
46 #pragma omp for firstprivate(t_var, vec, s_arr, var)
47 for (int i = 0; i < 2; ++i) {
48 vec[i] = t_var;
49 s_arr[i] = var;
50 }
51 return T();
52 }
53
54 S<float> test;
55 int t_var = 333;
56 int vec[] = {1, 2};
57 S<float> s_arr[] = {1, 2};
58 S<float> var(3);
59
main()60 int main() {
61 static int sivar;
62 #ifdef LAMBDA
63 [&]() {
64 #pragma omp parallel
65 #pragma omp for firstprivate(g, g1, sivar)
66 for (int i = 0; i < 2; ++i) {
67 // Skip temp vars for loop
68
69
70
71
72 g = 1;
73 g1 = 2;
74 sivar = 3;
75
76 [&]() {
77 g = 4;
78 g1 = 5;
79 sivar = 6;
80
81 }();
82 }
83 }();
84 return 0;
85 #elif defined(BLOCKS)
86 ^{
87 #pragma omp parallel
88 #pragma omp for firstprivate(g, g1, sivar)
89 for (int i = 0; i < 2; ++i) {
90 // Skip temp vars for loop
91
92
93
94
95 g = 1;
96 g1 =1;
97 sivar = 2;
98 ^{
99 g = 2;
100 g1 = 2;
101 sivar = 4;
102 }();
103 }
104 }();
105 return 0;
106 #else
107 #pragma omp for firstprivate(t_var, vec, s_arr, var, sivar)
108 for (int i = 0; i < 2; ++i) {
109 vec[i] = t_var;
110 s_arr[i] = var;
111 sivar += i;
112 }
113 return tmain<int>();
114 #endif
115 }
116
117 // Skip temp vars for loop
118
119 // firstprivate t_var(t_var)
120
121 // firstprivate vec(vec)
122
123 // firstprivate s_arr(s_arr)
124
125 // firstprivate var(var)
126
127 // firstprivate (sivar)
128
129 // Synchronization for initialization.
130
131
132 // ~(firstprivate var), ~(firstprivate s_arr)
133
134
135
136 // Skip temp vars for loop
137
138
139 // firstprivate vec(vec)
140
141 // firstprivate s_arr(s_arr)
142
143 // firstprivate var(var)
144
145 // No synchronization for initialization.
146
147
148 // ~(firstprivate var), ~(firstprivate s_arr)
149 #endif
150
151 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init
152 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" {
153 // CHECK1-NEXT: entry:
154 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) @test)
155 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
156 // CHECK1-NEXT: ret void
157 //
158 //
159 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
160 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
161 // CHECK1-NEXT: entry:
162 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
163 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
164 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
165 // CHECK1-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
166 // CHECK1-NEXT: ret void
167 //
168 //
169 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
170 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
171 // CHECK1-NEXT: entry:
172 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
173 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
174 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
175 // CHECK1-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
176 // CHECK1-NEXT: ret void
177 //
178 //
179 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
180 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
181 // CHECK1-NEXT: entry:
182 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
183 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
184 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
185 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
186 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
187 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
188 // CHECK1-NEXT: store float [[CONV]], float* [[F]], align 4
189 // CHECK1-NEXT: ret void
190 //
191 //
192 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
193 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
194 // CHECK1-NEXT: entry:
195 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
196 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
197 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
198 // CHECK1-NEXT: ret void
199 //
200 //
201 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
202 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
203 // CHECK1-NEXT: entry:
204 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00)
205 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00)
206 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
207 // CHECK1-NEXT: ret void
208 //
209 //
210 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
211 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
212 // CHECK1-NEXT: entry:
213 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
214 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
215 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
216 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
217 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
218 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
219 // CHECK1-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
220 // CHECK1-NEXT: ret void
221 //
222 //
223 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
224 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
225 // CHECK1-NEXT: entry:
226 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
227 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
228 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
229 // CHECK1: arraydestroy.body:
230 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
231 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
232 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
233 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
234 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
235 // CHECK1: arraydestroy.done1:
236 // CHECK1-NEXT: ret void
237 //
238 //
239 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
240 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
241 // CHECK1-NEXT: entry:
242 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
243 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
244 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
245 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
246 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
247 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
248 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
249 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
250 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
251 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
252 // CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4
253 // CHECK1-NEXT: ret void
254 //
255 //
256 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
257 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
258 // CHECK1-NEXT: entry:
259 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) @var, float noundef 3.000000e+00)
260 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
261 // CHECK1-NEXT: ret void
262 //
263 //
264 // CHECK1-LABEL: define {{[^@]+}}@main
265 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
266 // CHECK1-NEXT: entry:
267 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
268 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
269 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
270 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
271 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
272 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
273 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
274 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
275 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
276 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
277 // CHECK1-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
278 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
279 // CHECK1-NEXT: [[AGG_TMP2:%.*]] = alloca [[STRUCT_ST]], align 4
280 // CHECK1-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
281 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
282 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
283 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
284 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
285 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
286 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
287 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
288 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* @t_var, align 4
289 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[T_VAR]], align 4
290 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
291 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 bitcast ([2 x i32]* @vec to i8*), i64 8, i1 false)
292 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
293 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
294 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP3]]
295 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
296 // CHECK1: omp.arraycpy.body:
297 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
298 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
299 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]])
300 // CHECK1-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* noundef [[AGG_TMP]])
301 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]]) #[[ATTR2]]
302 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
303 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
304 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP3]]
305 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
306 // CHECK1: omp.arraycpy.done1:
307 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP2]])
308 // CHECK1-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]], %struct.S* noundef nonnull align 4 dereferenceable(4) @var, %struct.St* noundef [[AGG_TMP2]])
309 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP2]]) #[[ATTR2]]
310 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
311 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[SIVAR]], align 4
312 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
313 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
314 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1
315 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
316 // CHECK1: cond.true:
317 // CHECK1-NEXT: br label [[COND_END:%.*]]
318 // CHECK1: cond.false:
319 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
320 // CHECK1-NEXT: br label [[COND_END]]
321 // CHECK1: cond.end:
322 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
323 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
324 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
325 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
326 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
327 // CHECK1: omp.inner.for.cond:
328 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
329 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
330 // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
331 // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
332 // CHECK1: omp.inner.for.cond.cleanup:
333 // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
334 // CHECK1: omp.inner.for.body:
335 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
336 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
337 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
338 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
339 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[T_VAR]], align 4
340 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4
341 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
342 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
343 // CHECK1-NEXT: store i32 [[TMP11]], i32* [[ARRAYIDX]], align 4
344 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
345 // CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64
346 // CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 [[IDXPROM4]]
347 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[ARRAYIDX5]] to i8*
348 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[VAR]] to i8*
349 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i64 4, i1 false)
350 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
351 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[SIVAR]], align 4
352 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP16]]
353 // CHECK1-NEXT: store i32 [[ADD6]], i32* [[SIVAR]], align 4
354 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
355 // CHECK1: omp.body.continue:
356 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
357 // CHECK1: omp.inner.for.inc:
358 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
359 // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], 1
360 // CHECK1-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
361 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
362 // CHECK1: omp.inner.for.end:
363 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
364 // CHECK1: omp.loop.exit:
365 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
366 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
367 // CHECK1-NEXT: [[ARRAY_BEGIN8:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
368 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN8]], i64 2
369 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
370 // CHECK1: arraydestroy.body:
371 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP19]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
372 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
373 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
374 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN8]]
375 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE9:%.*]], label [[ARRAYDESTROY_BODY]]
376 // CHECK1: arraydestroy.done9:
377 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP0]])
378 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
379 // CHECK1-NEXT: ret i32 [[CALL]]
380 //
381 //
382 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StC1Ev
383 // CHECK1-SAME: (%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
384 // CHECK1-NEXT: entry:
385 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
386 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
387 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
388 // CHECK1-NEXT: call void @_ZN2StC2Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS1]])
389 // CHECK1-NEXT: ret void
390 //
391 //
392 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1ERKS0_2St
393 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
394 // CHECK1-NEXT: entry:
395 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
396 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8
397 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
398 // CHECK1-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
399 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
400 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
401 // CHECK1-NEXT: call void @_ZN1SIfEC2ERKS0_2St(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* noundef [[T]])
402 // CHECK1-NEXT: ret void
403 //
404 //
405 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StD1Ev
406 // CHECK1-SAME: (%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
407 // CHECK1-NEXT: entry:
408 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
409 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
410 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
411 // CHECK1-NEXT: call void @_ZN2StD2Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR2]]
412 // CHECK1-NEXT: ret void
413 //
414 //
415 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
416 // CHECK1-SAME: () #[[ATTR6:[0-9]+]] {
417 // CHECK1-NEXT: entry:
418 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
419 // CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
420 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
421 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
422 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
423 // CHECK1-NEXT: [[VAR:%.*]] = alloca %struct.S.0*, align 8
424 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
425 // CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4
426 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
427 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
428 // CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
429 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
430 // CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
431 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
432 // CHECK1-NEXT: store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
433 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8
434 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[T_VAR]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[TMP1]])
435 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
436 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
437 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
438 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
439 // CHECK1: arraydestroy.body:
440 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
441 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
442 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
443 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
444 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
445 // CHECK1: arraydestroy.done1:
446 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR2]]
447 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[RETVAL]], align 4
448 // CHECK1-NEXT: ret i32 [[TMP3]]
449 //
450 //
451 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StC2Ev
452 // CHECK1-SAME: (%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
453 // CHECK1-NEXT: entry:
454 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
455 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
456 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
457 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
458 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4
459 // CHECK1-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
460 // CHECK1-NEXT: store i32 0, i32* [[B]], align 4
461 // CHECK1-NEXT: ret void
462 //
463 //
464 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2ERKS0_2St
465 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
466 // CHECK1-NEXT: entry:
467 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
468 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8
469 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
470 // CHECK1-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
471 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
472 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
473 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
474 // CHECK1-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0
475 // CHECK1-NEXT: [[TMP1:%.*]] = load float, float* [[F2]], align 4
476 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0
477 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4
478 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to float
479 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP1]], [[CONV]]
480 // CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4
481 // CHECK1-NEXT: ret void
482 //
483 //
484 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StD2Ev
485 // CHECK1-SAME: (%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
486 // CHECK1-NEXT: entry:
487 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
488 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
489 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
490 // CHECK1-NEXT: ret void
491 //
492 //
493 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
494 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
495 // CHECK1-NEXT: entry:
496 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
497 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
498 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
499 // CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
500 // CHECK1-NEXT: ret void
501 //
502 //
503 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
504 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
505 // CHECK1-NEXT: entry:
506 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
507 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
508 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
509 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
510 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
511 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
512 // CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
513 // CHECK1-NEXT: ret void
514 //
515 //
516 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
517 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* noundef nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR7:[0-9]+]] {
518 // CHECK1-NEXT: entry:
519 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
520 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
521 // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
522 // CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
523 // CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
524 // CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
525 // CHECK1-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8
526 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 8
527 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
528 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
529 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
530 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
531 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
532 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
533 // CHECK1-NEXT: [[T_VAR3:%.*]] = alloca i32, align 4
534 // CHECK1-NEXT: [[VEC4:%.*]] = alloca [2 x i32], align 4
535 // CHECK1-NEXT: [[S_ARR5:%.*]] = alloca [2 x %struct.S.0], align 4
536 // CHECK1-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
537 // CHECK1-NEXT: [[VAR7:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
538 // CHECK1-NEXT: [[AGG_TMP8:%.*]] = alloca [[STRUCT_ST]], align 4
539 // CHECK1-NEXT: [[_TMP9:%.*]] = alloca %struct.S.0*, align 8
540 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
541 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
542 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
543 // CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
544 // CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
545 // CHECK1-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
546 // CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
547 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
548 // CHECK1-NEXT: [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
549 // CHECK1-NEXT: [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
550 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
551 // CHECK1-NEXT: store %struct.S.0* [[TMP3]], %struct.S.0** [[TMP]], align 8
552 // CHECK1-NEXT: [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8
553 // CHECK1-NEXT: store %struct.S.0* [[TMP4]], %struct.S.0** [[_TMP1]], align 8
554 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
555 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
556 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
557 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
558 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
559 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[T_VAR3]], align 4
560 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast [2 x i32]* [[VEC4]] to i8*
561 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8*
562 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP6]], i8* align 4 [[TMP7]], i64 8, i1 false)
563 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i32 0, i32 0
564 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast [2 x %struct.S.0]* [[TMP2]] to %struct.S.0*
565 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
566 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP9]]
567 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
568 // CHECK1: omp.arraycpy.body:
569 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
570 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
571 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]])
572 // CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* noundef [[AGG_TMP]])
573 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]]) #[[ATTR2]]
574 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
575 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
576 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP9]]
577 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
578 // CHECK1: omp.arraycpy.done6:
579 // CHECK1-NEXT: [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP1]], align 8
580 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP8]])
581 // CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR7]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP10]], %struct.St* noundef [[AGG_TMP8]])
582 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP8]]) #[[ATTR2]]
583 // CHECK1-NEXT: store %struct.S.0* [[VAR7]], %struct.S.0** [[_TMP9]], align 8
584 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
585 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
586 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
587 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
588 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 1
589 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
590 // CHECK1: cond.true:
591 // CHECK1-NEXT: br label [[COND_END:%.*]]
592 // CHECK1: cond.false:
593 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
594 // CHECK1-NEXT: br label [[COND_END]]
595 // CHECK1: cond.end:
596 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
597 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
598 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
599 // CHECK1-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
600 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
601 // CHECK1: omp.inner.for.cond:
602 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
603 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
604 // CHECK1-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
605 // CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
606 // CHECK1: omp.inner.for.cond.cleanup:
607 // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
608 // CHECK1: omp.inner.for.body:
609 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
610 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
611 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
612 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
613 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[T_VAR3]], align 4
614 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4
615 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
616 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC4]], i64 0, i64 [[IDXPROM]]
617 // CHECK1-NEXT: store i32 [[TMP19]], i32* [[ARRAYIDX]], align 4
618 // CHECK1-NEXT: [[TMP21:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP9]], align 8
619 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4
620 // CHECK1-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP22]] to i64
621 // CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i64 0, i64 [[IDXPROM11]]
622 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast %struct.S.0* [[ARRAYIDX12]] to i8*
623 // CHECK1-NEXT: [[TMP24:%.*]] = bitcast %struct.S.0* [[TMP21]] to i8*
624 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
625 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
626 // CHECK1: omp.body.continue:
627 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
628 // CHECK1: omp.inner.for.inc:
629 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
630 // CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP25]], 1
631 // CHECK1-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4
632 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
633 // CHECK1: omp.inner.for.end:
634 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
635 // CHECK1: omp.loop.exit:
636 // CHECK1-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
637 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
638 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]])
639 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR7]]) #[[ATTR2]]
640 // CHECK1-NEXT: [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i32 0, i32 0
641 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN14]], i64 2
642 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
643 // CHECK1: arraydestroy.body:
644 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP28]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
645 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
646 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
647 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN14]]
648 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY]]
649 // CHECK1: arraydestroy.done15:
650 // CHECK1-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
651 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
652 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]])
653 // CHECK1-NEXT: ret void
654 //
655 //
656 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_2St
657 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
658 // CHECK1-NEXT: entry:
659 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
660 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
661 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
662 // CHECK1-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
663 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
664 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
665 // CHECK1-NEXT: call void @_ZN1SIiEC2ERKS0_2St(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* noundef [[T]])
666 // CHECK1-NEXT: ret void
667 //
668 //
669 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
670 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
671 // CHECK1-NEXT: entry:
672 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
673 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
674 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
675 // CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
676 // CHECK1-NEXT: ret void
677 //
678 //
679 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
680 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
681 // CHECK1-NEXT: entry:
682 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
683 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
684 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
685 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
686 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
687 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[F]], align 4
688 // CHECK1-NEXT: ret void
689 //
690 //
691 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
692 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
693 // CHECK1-NEXT: entry:
694 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
695 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
696 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
697 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
698 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
699 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
700 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
701 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
702 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
703 // CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4
704 // CHECK1-NEXT: ret void
705 //
706 //
707 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_2St
708 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
709 // CHECK1-NEXT: entry:
710 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
711 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
712 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
713 // CHECK1-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
714 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
715 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
716 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
717 // CHECK1-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0
718 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[F2]], align 4
719 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0
720 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4
721 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
722 // CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4
723 // CHECK1-NEXT: ret void
724 //
725 //
726 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
727 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
728 // CHECK1-NEXT: entry:
729 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
730 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
731 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
732 // CHECK1-NEXT: ret void
733 //
734 //
735 // CHECK1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp
736 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
737 // CHECK1-NEXT: entry:
738 // CHECK1-NEXT: call void @__cxx_global_var_init()
739 // CHECK1-NEXT: call void @__cxx_global_var_init.1()
740 // CHECK1-NEXT: call void @__cxx_global_var_init.2()
741 // CHECK1-NEXT: ret void
742 //
743 //
744 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init
745 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" {
746 // CHECK3-NEXT: entry:
747 // CHECK3-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) @test)
748 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
749 // CHECK3-NEXT: ret void
750 //
751 //
752 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
753 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
754 // CHECK3-NEXT: entry:
755 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
756 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
757 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
758 // CHECK3-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
759 // CHECK3-NEXT: ret void
760 //
761 //
762 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
763 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
764 // CHECK3-NEXT: entry:
765 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
766 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
767 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
768 // CHECK3-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
769 // CHECK3-NEXT: ret void
770 //
771 //
772 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
773 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
774 // CHECK3-NEXT: entry:
775 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
776 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
777 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
778 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
779 // CHECK3-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
780 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
781 // CHECK3-NEXT: store float [[CONV]], float* [[F]], align 4
782 // CHECK3-NEXT: ret void
783 //
784 //
785 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
786 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
787 // CHECK3-NEXT: entry:
788 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
789 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
790 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
791 // CHECK3-NEXT: ret void
792 //
793 //
794 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
795 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
796 // CHECK3-NEXT: entry:
797 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00)
798 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00)
799 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
800 // CHECK3-NEXT: ret void
801 //
802 //
803 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
804 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
805 // CHECK3-NEXT: entry:
806 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
807 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
808 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
809 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4
810 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
811 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
812 // CHECK3-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
813 // CHECK3-NEXT: ret void
814 //
815 //
816 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
817 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
818 // CHECK3-NEXT: entry:
819 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
820 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
821 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
822 // CHECK3: arraydestroy.body:
823 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
824 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
825 // CHECK3-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
826 // CHECK3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
827 // CHECK3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
828 // CHECK3: arraydestroy.done1:
829 // CHECK3-NEXT: ret void
830 //
831 //
832 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
833 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
834 // CHECK3-NEXT: entry:
835 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
836 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
837 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
838 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4
839 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
840 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
841 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
842 // CHECK3-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
843 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
844 // CHECK3-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
845 // CHECK3-NEXT: store float [[ADD]], float* [[F]], align 4
846 // CHECK3-NEXT: ret void
847 //
848 //
849 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
850 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
851 // CHECK3-NEXT: entry:
852 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) @var, float noundef 3.000000e+00)
853 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
854 // CHECK3-NEXT: ret void
855 //
856 //
857 // CHECK3-LABEL: define {{[^@]+}}@main
858 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] {
859 // CHECK3-NEXT: entry:
860 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
861 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
862 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
863 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
864 // CHECK3-NEXT: store i32* @_ZZ4mainE5sivar, i32** [[TMP0]], align 8
865 // CHECK3-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
866 // CHECK3-NEXT: ret i32 0
867 //
868 //
869 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
870 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR5:[0-9]+]] {
871 // CHECK3-NEXT: entry:
872 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
873 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
874 // CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
875 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8
876 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
877 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
878 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
879 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
880 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
881 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
882 // CHECK3-NEXT: [[G:%.*]] = alloca i32, align 4
883 // CHECK3-NEXT: [[G1:%.*]] = alloca i32, align 4
884 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8
885 // CHECK3-NEXT: [[SIVAR3:%.*]] = alloca i32, align 4
886 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
887 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
888 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
889 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
890 // CHECK3-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
891 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
892 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** @g1, align 8
893 // CHECK3-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
894 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
895 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
896 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
897 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
898 // CHECK3-NEXT: [[TMP2:%.*]] = load volatile i32, i32* @g, align 4
899 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[G]], align 4
900 // CHECK3-NEXT: [[TMP3:%.*]] = load volatile i32, i32* @g, align 4
901 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[G1]], align 4
902 // CHECK3-NEXT: store i32* [[G1]], i32** [[_TMP2]], align 8
903 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
904 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[SIVAR3]], align 4
905 // CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
906 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
907 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
908 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
909 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 1
910 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
911 // CHECK3: cond.true:
912 // CHECK3-NEXT: br label [[COND_END:%.*]]
913 // CHECK3: cond.false:
914 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
915 // CHECK3-NEXT: br label [[COND_END]]
916 // CHECK3: cond.end:
917 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
918 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
919 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
920 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
921 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
922 // CHECK3: omp.inner.for.cond:
923 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
924 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
925 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
926 // CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
927 // CHECK3: omp.inner.for.body:
928 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
929 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
930 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
931 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
932 // CHECK3-NEXT: store i32 1, i32* [[G]], align 4
933 // CHECK3-NEXT: [[TMP13:%.*]] = load i32*, i32** [[_TMP2]], align 8
934 // CHECK3-NEXT: store volatile i32 2, i32* [[TMP13]], align 4
935 // CHECK3-NEXT: store i32 3, i32* [[SIVAR3]], align 4
936 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
937 // CHECK3-NEXT: store i32* [[G]], i32** [[TMP14]], align 8
938 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
939 // CHECK3-NEXT: [[TMP16:%.*]] = load i32*, i32** [[_TMP2]], align 8
940 // CHECK3-NEXT: store i32* [[TMP16]], i32** [[TMP15]], align 8
941 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
942 // CHECK3-NEXT: store i32* [[SIVAR3]], i32** [[TMP17]], align 8
943 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
944 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
945 // CHECK3: omp.body.continue:
946 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
947 // CHECK3: omp.inner.for.inc:
948 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
949 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], 1
950 // CHECK3-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
951 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
952 // CHECK3: omp.inner.for.end:
953 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
954 // CHECK3: omp.loop.exit:
955 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
956 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]])
957 // CHECK3-NEXT: ret void
958 //
959 //
960 // CHECK3-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp
961 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
962 // CHECK3-NEXT: entry:
963 // CHECK3-NEXT: call void @__cxx_global_var_init()
964 // CHECK3-NEXT: call void @__cxx_global_var_init.1()
965 // CHECK3-NEXT: call void @__cxx_global_var_init.2()
966 // CHECK3-NEXT: ret void
967 //
968 //
969 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init
970 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" {
971 // CHECK4-NEXT: entry:
972 // CHECK4-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) @test)
973 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
974 // CHECK4-NEXT: ret void
975 //
976 //
977 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
978 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
979 // CHECK4-NEXT: entry:
980 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
981 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
982 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
983 // CHECK4-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
984 // CHECK4-NEXT: ret void
985 //
986 //
987 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
988 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
989 // CHECK4-NEXT: entry:
990 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
991 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
992 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
993 // CHECK4-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
994 // CHECK4-NEXT: ret void
995 //
996 //
997 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
998 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
999 // CHECK4-NEXT: entry:
1000 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1001 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1002 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1003 // CHECK4-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1004 // CHECK4-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
1005 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
1006 // CHECK4-NEXT: store float [[CONV]], float* [[F]], align 4
1007 // CHECK4-NEXT: ret void
1008 //
1009 //
1010 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
1011 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1012 // CHECK4-NEXT: entry:
1013 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1014 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1015 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1016 // CHECK4-NEXT: ret void
1017 //
1018 //
1019 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
1020 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
1021 // CHECK4-NEXT: entry:
1022 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00)
1023 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00)
1024 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
1025 // CHECK4-NEXT: ret void
1026 //
1027 //
1028 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
1029 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1030 // CHECK4-NEXT: entry:
1031 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1032 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
1033 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1034 // CHECK4-NEXT: store float [[A]], float* [[A_ADDR]], align 4
1035 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1036 // CHECK4-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1037 // CHECK4-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
1038 // CHECK4-NEXT: ret void
1039 //
1040 //
1041 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
1042 // CHECK4-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
1043 // CHECK4-NEXT: entry:
1044 // CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1045 // CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1046 // CHECK4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
1047 // CHECK4: arraydestroy.body:
1048 // CHECK4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1049 // CHECK4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1050 // CHECK4-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1051 // CHECK4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
1052 // CHECK4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1053 // CHECK4: arraydestroy.done1:
1054 // CHECK4-NEXT: ret void
1055 //
1056 //
1057 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
1058 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1059 // CHECK4-NEXT: entry:
1060 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1061 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
1062 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1063 // CHECK4-NEXT: store float [[A]], float* [[A_ADDR]], align 4
1064 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1065 // CHECK4-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1066 // CHECK4-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1067 // CHECK4-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
1068 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
1069 // CHECK4-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
1070 // CHECK4-NEXT: store float [[ADD]], float* [[F]], align 4
1071 // CHECK4-NEXT: ret void
1072 //
1073 //
1074 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
1075 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
1076 // CHECK4-NEXT: entry:
1077 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) @var, float noundef 3.000000e+00)
1078 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
1079 // CHECK4-NEXT: ret void
1080 //
1081 //
1082 // CHECK4-LABEL: define {{[^@]+}}@main
1083 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] {
1084 // CHECK4-NEXT: entry:
1085 // CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
1086 // CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, align 8
1087 // CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4
1088 // CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 0
1089 // CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
1090 // CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 1
1091 // CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
1092 // CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 2
1093 // CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4
1094 // CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 3
1095 // CHECK4-NEXT: store i8* bitcast (void (i8*)* @__main_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8
1096 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 4
1097 // CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.3 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
1098 // CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 5
1099 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
1100 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[BLOCK_CAPTURED]], align 8
1101 // CHECK4-NEXT: [[TMP1:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]] to void ()*
1102 // CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP1]] to %struct.__block_literal_generic*
1103 // CHECK4-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
1104 // CHECK4-NEXT: [[TMP3:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
1105 // CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[TMP2]], align 8
1106 // CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to void (i8*)*
1107 // CHECK4-NEXT: call void [[TMP5]](i8* noundef [[TMP3]])
1108 // CHECK4-NEXT: ret i32 0
1109 //
1110 //
1111 // CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke
1112 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] {
1113 // CHECK4-NEXT: entry:
1114 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1115 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*, align 8
1116 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1117 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*
1118 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>** [[BLOCK_ADDR]], align 8
1119 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @_ZZ4mainE5sivar)
1120 // CHECK4-NEXT: ret void
1121 //
1122 //
1123 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
1124 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR4:[0-9]+]] {
1125 // CHECK4-NEXT: entry:
1126 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1127 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1128 // CHECK4-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
1129 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8
1130 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1131 // CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
1132 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1133 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1134 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1135 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1136 // CHECK4-NEXT: [[G:%.*]] = alloca i32, align 4
1137 // CHECK4-NEXT: [[G1:%.*]] = alloca i32, align 4
1138 // CHECK4-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8
1139 // CHECK4-NEXT: [[SIVAR3:%.*]] = alloca i32, align 4
1140 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
1141 // CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, align 8
1142 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1143 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1144 // CHECK4-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
1145 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
1146 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** @g1, align 8
1147 // CHECK4-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
1148 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
1149 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
1150 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1151 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1152 // CHECK4-NEXT: [[TMP2:%.*]] = load volatile i32, i32* @g, align 4
1153 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[G]], align 4
1154 // CHECK4-NEXT: [[TMP3:%.*]] = load volatile i32, i32* @g, align 4
1155 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[G1]], align 4
1156 // CHECK4-NEXT: store i32* [[G1]], i32** [[_TMP2]], align 8
1157 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1158 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[SIVAR3]], align 4
1159 // CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1160 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1161 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1162 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1163 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 1
1164 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1165 // CHECK4: cond.true:
1166 // CHECK4-NEXT: br label [[COND_END:%.*]]
1167 // CHECK4: cond.false:
1168 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1169 // CHECK4-NEXT: br label [[COND_END]]
1170 // CHECK4: cond.end:
1171 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
1172 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1173 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1174 // CHECK4-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
1175 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1176 // CHECK4: omp.inner.for.cond:
1177 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1178 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1179 // CHECK4-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
1180 // CHECK4-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1181 // CHECK4: omp.inner.for.body:
1182 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1183 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
1184 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1185 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
1186 // CHECK4-NEXT: store i32 1, i32* [[G]], align 4
1187 // CHECK4-NEXT: [[TMP13:%.*]] = load i32*, i32** [[_TMP2]], align 8
1188 // CHECK4-NEXT: store volatile i32 1, i32* [[TMP13]], align 4
1189 // CHECK4-NEXT: store i32 2, i32* [[SIVAR3]], align 4
1190 // CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 0
1191 // CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
1192 // CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 1
1193 // CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
1194 // CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 2
1195 // CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4
1196 // CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 3
1197 // CHECK4-NEXT: store i8* bitcast (void (i8*)* @var_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8
1198 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 4
1199 // CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
1200 // CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 6
1201 // CHECK4-NEXT: [[TMP14:%.*]] = load volatile i32, i32* [[G]], align 4
1202 // CHECK4-NEXT: store volatile i32 [[TMP14]], i32* [[BLOCK_CAPTURED]], align 8
1203 // CHECK4-NEXT: [[BLOCK_CAPTURED5:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 5
1204 // CHECK4-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP2]], align 8
1205 // CHECK4-NEXT: store i32* [[TMP15]], i32** [[BLOCK_CAPTURED5]], align 8
1206 // CHECK4-NEXT: [[BLOCK_CAPTURED6:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 7
1207 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[SIVAR3]], align 4
1208 // CHECK4-NEXT: store i32 [[TMP16]], i32* [[BLOCK_CAPTURED6]], align 4
1209 // CHECK4-NEXT: [[TMP17:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]] to void ()*
1210 // CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP17]] to %struct.__block_literal_generic*
1211 // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
1212 // CHECK4-NEXT: [[TMP19:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
1213 // CHECK4-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP18]], align 8
1214 // CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to void (i8*)*
1215 // CHECK4-NEXT: call void [[TMP21]](i8* noundef [[TMP19]])
1216 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1217 // CHECK4: omp.body.continue:
1218 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1219 // CHECK4: omp.inner.for.inc:
1220 // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1221 // CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], 1
1222 // CHECK4-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
1223 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
1224 // CHECK4: omp.inner.for.end:
1225 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1226 // CHECK4: omp.loop.exit:
1227 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
1228 // CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]])
1229 // CHECK4-NEXT: ret void
1230 //
1231 //
1232 // CHECK4-LABEL: define {{[^@]+}}@var_block_invoke
1233 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] {
1234 // CHECK4-NEXT: entry:
1235 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1236 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>*, align 8
1237 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1238 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>*
1239 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>** [[BLOCK_ADDR]], align 8
1240 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 6
1241 // CHECK4-NEXT: store i32 2, i32* [[BLOCK_CAPTURE_ADDR]], align 8
1242 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 5
1243 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[BLOCK_CAPTURE_ADDR1]], align 8
1244 // CHECK4-NEXT: store i32 2, i32* [[TMP0]], align 4
1245 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 7
1246 // CHECK4-NEXT: store i32 4, i32* [[BLOCK_CAPTURE_ADDR2]], align 4
1247 // CHECK4-NEXT: ret void
1248 //
1249 //
1250 // CHECK4-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp
1251 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
1252 // CHECK4-NEXT: entry:
1253 // CHECK4-NEXT: call void @__cxx_global_var_init()
1254 // CHECK4-NEXT: call void @__cxx_global_var_init.1()
1255 // CHECK4-NEXT: call void @__cxx_global_var_init.2()
1256 // CHECK4-NEXT: ret void
1257 //
1258