1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK4
7
8 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
9 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
11 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // expected-no-diagnostics
14 #ifndef HEADER
15 #define HEADER
16
17 struct St {
18 int a, b;
StSt19 St() : a(0), b(0) {}
StSt20 St(const St &st) : a(st.a + st.b), b(0) {}
~StSt21 ~St() {}
22 };
23
24 volatile int g = 1212;
25
26 template <class T>
27 struct S {
28 T f;
SS29 S(T a) : f(a + g) {}
SS30 S() : f(g) {}
SS31 S(const S &s, St t = St()) : f(s.f + t.a) {}
operator TS32 operator T() { return T(); }
~SS33 ~S() {}
34 };
35
36
37 template <typename T>
tmain()38 T tmain() {
39 S<T> test;
40 T t_var = T();
41 T vec[] = {1, 2};
42 S<T> s_arr[] = {1, 2};
43 S<T> var(3);
44 #pragma omp parallel
45 #pragma omp sections firstprivate(t_var, vec, s_arr, var)
46 {
47 vec[0] = t_var;
48 #pragma omp section
49 s_arr[0] = var;
50 }
51 return T();
52 }
53
54 S<float> test;
55 int t_var = 333;
56 int vec[] = {1, 2};
57 S<float> s_arr[] = {1, 2};
58 S<float> var(3);
59
main()60 int main() {
61 static int sivar;
62 #ifdef LAMBDA
63 [&]() {
64 #pragma omp parallel
65 #pragma omp sections firstprivate(g, sivar)
66 {
67 // Skip temp vars for loop
68
69
70
71 {
72 g = 1;
73 sivar = 10;
74 }
75 #pragma omp section
76 [&]() {
77 g = 2;
78 sivar = 20;
79 }();
80 }
81 }();
82 return 0;
83 #elif defined(BLOCKS)
84 ^{
85 #pragma omp parallel
86 #pragma omp sections firstprivate(g, sivar)
87 {
88 // Skip temp vars for loop
89
90
91
92 {
93 g = 1;
94 sivar = 10;
95 }
96 #pragma omp section
97 ^{
98 g = 2;
99 sivar = 20;
100 }();
101 }
102 }();
103 return 0;
104 #else
105 #pragma omp sections firstprivate(t_var, vec, s_arr, var, sivar) nowait
106 {
107 {
108 vec[0] = t_var;
109 s_arr[0] = var;
110 sivar = 31;
111 }
112 }
113 return tmain<int>();
114 #endif
115 }
116
117
118 // firstprivate t_var(t_var)
119
120 // firstprivate vec(vec)
121
122 // firstprivate s_arr(s_arr)
123
124 // firstprivate var(var)
125
126 // firstprivate isvar
127
128
129 // ~(firstprivate var), ~(firstprivate s_arr)
130
131
132
133 // Skip temp vars for loop
134
135
136 // firstprivate t_var(t_var)
137
138 // firstprivate vec(vec)
139
140 // firstprivate s_arr(s_arr)
141
142 // firstprivate var(var)
143
144 // No synchronization for initialization.
145
146
147 // ~(firstprivate var), ~(firstprivate s_arr)
148 #endif
149
150 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init
151 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" {
152 // CHECK1-NEXT: entry:
153 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) @test)
154 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
155 // CHECK1-NEXT: ret void
156 //
157 //
158 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
159 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
160 // CHECK1-NEXT: entry:
161 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
162 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
163 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
164 // CHECK1-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
165 // CHECK1-NEXT: ret void
166 //
167 //
168 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
169 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
170 // CHECK1-NEXT: entry:
171 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
172 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
173 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
174 // CHECK1-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
175 // CHECK1-NEXT: ret void
176 //
177 //
178 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
179 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
180 // CHECK1-NEXT: entry:
181 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
182 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
183 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
184 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
185 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
186 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
187 // CHECK1-NEXT: store float [[CONV]], float* [[F]], align 4
188 // CHECK1-NEXT: ret void
189 //
190 //
191 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
192 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
193 // CHECK1-NEXT: entry:
194 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
195 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
196 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
197 // CHECK1-NEXT: ret void
198 //
199 //
200 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
201 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
202 // CHECK1-NEXT: entry:
203 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00)
204 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00)
205 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
206 // CHECK1-NEXT: ret void
207 //
208 //
209 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
210 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
211 // CHECK1-NEXT: entry:
212 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
213 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
214 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
215 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
216 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
217 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
218 // CHECK1-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
219 // CHECK1-NEXT: ret void
220 //
221 //
222 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
223 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
224 // CHECK1-NEXT: entry:
225 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
226 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
227 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
228 // CHECK1: arraydestroy.body:
229 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
230 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
231 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
232 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
233 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
234 // CHECK1: arraydestroy.done1:
235 // CHECK1-NEXT: ret void
236 //
237 //
238 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
239 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
240 // CHECK1-NEXT: entry:
241 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
242 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
243 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
244 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
245 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
246 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
247 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
248 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
249 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
250 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
251 // CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4
252 // CHECK1-NEXT: ret void
253 //
254 //
255 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
256 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
257 // CHECK1-NEXT: entry:
258 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) @var, float noundef 3.000000e+00)
259 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
260 // CHECK1-NEXT: ret void
261 //
262 //
263 // CHECK1-LABEL: define {{[^@]+}}@main
264 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
265 // CHECK1-NEXT: entry:
266 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
267 // CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
268 // CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
269 // CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
270 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
271 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
272 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
273 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
274 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
275 // CHECK1-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
276 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
277 // CHECK1-NEXT: [[AGG_TMP2:%.*]] = alloca [[STRUCT_ST]], align 4
278 // CHECK1-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
279 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
280 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
281 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
282 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
283 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
284 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
285 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* @t_var, align 4
286 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[T_VAR]], align 4
287 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
288 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 bitcast ([2 x i32]* @vec to i8*), i64 8, i1 false)
289 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
290 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
291 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP3]]
292 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
293 // CHECK1: omp.arraycpy.body:
294 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
295 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
296 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]])
297 // CHECK1-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* noundef [[AGG_TMP]])
298 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]]) #[[ATTR2]]
299 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
300 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
301 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP3]]
302 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
303 // CHECK1: omp.arraycpy.done1:
304 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP2]])
305 // CHECK1-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]], %struct.S* noundef nonnull align 4 dereferenceable(4) @var, %struct.St* noundef [[AGG_TMP2]])
306 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP2]]) #[[ATTR2]]
307 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
308 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[SIVAR]], align 4
309 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
310 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
311 // CHECK1-NEXT: [[TMP6:%.*]] = icmp slt i32 [[TMP5]], 0
312 // CHECK1-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP5]], i32 0
313 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
314 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
315 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
316 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
317 // CHECK1: omp.inner.for.cond:
318 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
319 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
320 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
321 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
322 // CHECK1: omp.inner.for.body:
323 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
324 // CHECK1-NEXT: switch i32 [[TMP11]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
325 // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
326 // CHECK1-NEXT: ]
327 // CHECK1: .omp.sections.case:
328 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[T_VAR]], align 4
329 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
330 // CHECK1-NEXT: store i32 [[TMP12]], i32* [[ARRAYIDX]], align 4
331 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
332 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[ARRAYIDX3]] to i8*
333 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[VAR]] to i8*
334 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP13]], i8* align 4 [[TMP14]], i64 4, i1 false)
335 // CHECK1-NEXT: store i32 31, i32* [[SIVAR]], align 4
336 // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
337 // CHECK1: .omp.sections.exit:
338 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
339 // CHECK1: omp.inner.for.inc:
340 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
341 // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP15]], 1
342 // CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
343 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
344 // CHECK1: omp.inner.for.end:
345 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
346 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
347 // CHECK1-NEXT: [[ARRAY_BEGIN4:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
348 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN4]], i64 2
349 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
350 // CHECK1: arraydestroy.body:
351 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP16]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
352 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
353 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
354 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN4]]
355 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE5:%.*]], label [[ARRAYDESTROY_BODY]]
356 // CHECK1: arraydestroy.done5:
357 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
358 // CHECK1-NEXT: ret i32 [[CALL]]
359 //
360 //
361 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StC1Ev
362 // CHECK1-SAME: (%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
363 // CHECK1-NEXT: entry:
364 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
365 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
366 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
367 // CHECK1-NEXT: call void @_ZN2StC2Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS1]])
368 // CHECK1-NEXT: ret void
369 //
370 //
371 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1ERKS0_2St
372 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
373 // CHECK1-NEXT: entry:
374 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
375 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8
376 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
377 // CHECK1-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
378 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
379 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
380 // CHECK1-NEXT: call void @_ZN1SIfEC2ERKS0_2St(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* noundef [[T]])
381 // CHECK1-NEXT: ret void
382 //
383 //
384 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StD1Ev
385 // CHECK1-SAME: (%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
386 // CHECK1-NEXT: entry:
387 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
388 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
389 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
390 // CHECK1-NEXT: call void @_ZN2StD2Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR2]]
391 // CHECK1-NEXT: ret void
392 //
393 //
394 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
395 // CHECK1-SAME: () #[[ATTR5:[0-9]+]] {
396 // CHECK1-NEXT: entry:
397 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
398 // CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
399 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
400 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
401 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
402 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
403 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
404 // CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4
405 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
406 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
407 // CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
408 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
409 // CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
410 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
411 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
412 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[T_VAR]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[VAR]])
413 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
414 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
415 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
416 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
417 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
418 // CHECK1: arraydestroy.body:
419 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
420 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
421 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
422 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
423 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
424 // CHECK1: arraydestroy.done1:
425 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR2]]
426 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
427 // CHECK1-NEXT: ret i32 [[TMP2]]
428 //
429 //
430 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StC2Ev
431 // CHECK1-SAME: (%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
432 // CHECK1-NEXT: entry:
433 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
434 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
435 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
436 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
437 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4
438 // CHECK1-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
439 // CHECK1-NEXT: store i32 0, i32* [[B]], align 4
440 // CHECK1-NEXT: ret void
441 //
442 //
443 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2ERKS0_2St
444 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
445 // CHECK1-NEXT: entry:
446 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
447 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8
448 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
449 // CHECK1-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
450 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
451 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
452 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
453 // CHECK1-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0
454 // CHECK1-NEXT: [[TMP1:%.*]] = load float, float* [[F2]], align 4
455 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0
456 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4
457 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to float
458 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP1]], [[CONV]]
459 // CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4
460 // CHECK1-NEXT: ret void
461 //
462 //
463 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StD2Ev
464 // CHECK1-SAME: (%struct.St* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
465 // CHECK1-NEXT: entry:
466 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
467 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
468 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
469 // CHECK1-NEXT: ret void
470 //
471 //
472 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
473 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
474 // CHECK1-NEXT: entry:
475 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
476 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
477 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
478 // CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
479 // CHECK1-NEXT: ret void
480 //
481 //
482 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
483 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
484 // CHECK1-NEXT: entry:
485 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
486 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
487 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
488 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
489 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
490 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
491 // CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
492 // CHECK1-NEXT: ret void
493 //
494 //
495 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
496 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* noundef nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR6:[0-9]+]] {
497 // CHECK1-NEXT: entry:
498 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
499 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
500 // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
501 // CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
502 // CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
503 // CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
504 // CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
505 // CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
506 // CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
507 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
508 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
509 // CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
510 // CHECK1-NEXT: [[VEC2:%.*]] = alloca [2 x i32], align 4
511 // CHECK1-NEXT: [[S_ARR3:%.*]] = alloca [2 x %struct.S.0], align 4
512 // CHECK1-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
513 // CHECK1-NEXT: [[VAR5:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
514 // CHECK1-NEXT: [[AGG_TMP6:%.*]] = alloca [[STRUCT_ST]], align 4
515 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
516 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
517 // CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
518 // CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
519 // CHECK1-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
520 // CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
521 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
522 // CHECK1-NEXT: [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
523 // CHECK1-NEXT: [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
524 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
525 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
526 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
527 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
528 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
529 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
530 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[T_VAR1]], align 4
531 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast [2 x i32]* [[VEC2]] to i8*
532 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8*
533 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP5]], i8* align 4 [[TMP6]], i64 8, i1 false)
534 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i32 0, i32 0
535 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast [2 x %struct.S.0]* [[TMP2]] to %struct.S.0*
536 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
537 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP8]]
538 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
539 // CHECK1: omp.arraycpy.body:
540 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP7]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
541 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
542 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]])
543 // CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* noundef [[AGG_TMP]])
544 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP]]) #[[ATTR2]]
545 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
546 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
547 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]]
548 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]]
549 // CHECK1: omp.arraycpy.done4:
550 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP6]])
551 // CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR5]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP3]], %struct.St* noundef [[AGG_TMP6]])
552 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef nonnull align 4 dereferenceable(8) [[AGG_TMP6]]) #[[ATTR2]]
553 // CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
554 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
555 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
556 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
557 // CHECK1-NEXT: [[TMP12:%.*]] = icmp slt i32 [[TMP11]], 1
558 // CHECK1-NEXT: [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP11]], i32 1
559 // CHECK1-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
560 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
561 // CHECK1-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
562 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
563 // CHECK1: omp.inner.for.cond:
564 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
565 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
566 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
567 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
568 // CHECK1: omp.inner.for.body:
569 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
570 // CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
571 // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
572 // CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE7:%.*]]
573 // CHECK1-NEXT: ]
574 // CHECK1: .omp.sections.case:
575 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[T_VAR1]], align 4
576 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC2]], i64 0, i64 0
577 // CHECK1-NEXT: store i32 [[TMP18]], i32* [[ARRAYIDX]], align 4
578 // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
579 // CHECK1: .omp.sections.case7:
580 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i64 0, i64 0
581 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.S.0* [[ARRAYIDX8]] to i8*
582 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast %struct.S.0* [[VAR5]] to i8*
583 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 4, i1 false)
584 // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
585 // CHECK1: .omp.sections.exit:
586 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
587 // CHECK1: omp.inner.for.inc:
588 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
589 // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP21]], 1
590 // CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
591 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
592 // CHECK1: omp.inner.for.end:
593 // CHECK1-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
594 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
595 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]])
596 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR2]]
597 // CHECK1-NEXT: [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i32 0, i32 0
598 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN9]], i64 2
599 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
600 // CHECK1: arraydestroy.body:
601 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP24]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
602 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
603 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
604 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN9]]
605 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE10:%.*]], label [[ARRAYDESTROY_BODY]]
606 // CHECK1: arraydestroy.done10:
607 // CHECK1-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
608 // CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
609 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP26]])
610 // CHECK1-NEXT: ret void
611 //
612 //
613 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_2St
614 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
615 // CHECK1-NEXT: entry:
616 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
617 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
618 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
619 // CHECK1-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
620 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
621 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
622 // CHECK1-NEXT: call void @_ZN1SIiEC2ERKS0_2St(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* noundef [[T]])
623 // CHECK1-NEXT: ret void
624 //
625 //
626 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
627 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
628 // CHECK1-NEXT: entry:
629 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
630 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
631 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
632 // CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
633 // CHECK1-NEXT: ret void
634 //
635 //
636 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
637 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
638 // CHECK1-NEXT: entry:
639 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
640 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
641 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
642 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
643 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
644 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[F]], align 4
645 // CHECK1-NEXT: ret void
646 //
647 //
648 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
649 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
650 // CHECK1-NEXT: entry:
651 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
652 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
653 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
654 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
655 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
656 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
657 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
658 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
659 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
660 // CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4
661 // CHECK1-NEXT: ret void
662 //
663 //
664 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_2St
665 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
666 // CHECK1-NEXT: entry:
667 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
668 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
669 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
670 // CHECK1-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
671 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
672 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
673 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
674 // CHECK1-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0
675 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[F2]], align 4
676 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0
677 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4
678 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
679 // CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4
680 // CHECK1-NEXT: ret void
681 //
682 //
683 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
684 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
685 // CHECK1-NEXT: entry:
686 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
687 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
688 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
689 // CHECK1-NEXT: ret void
690 //
691 //
692 // CHECK1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_sections_firstprivate_codegen.cpp
693 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
694 // CHECK1-NEXT: entry:
695 // CHECK1-NEXT: call void @__cxx_global_var_init()
696 // CHECK1-NEXT: call void @__cxx_global_var_init.1()
697 // CHECK1-NEXT: call void @__cxx_global_var_init.2()
698 // CHECK1-NEXT: ret void
699 //
700 //
701 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init
702 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" {
703 // CHECK3-NEXT: entry:
704 // CHECK3-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) @test)
705 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
706 // CHECK3-NEXT: ret void
707 //
708 //
709 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
710 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
711 // CHECK3-NEXT: entry:
712 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
713 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
714 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
715 // CHECK3-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
716 // CHECK3-NEXT: ret void
717 //
718 //
719 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
720 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
721 // CHECK3-NEXT: entry:
722 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
723 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
724 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
725 // CHECK3-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
726 // CHECK3-NEXT: ret void
727 //
728 //
729 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
730 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
731 // CHECK3-NEXT: entry:
732 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
733 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
734 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
735 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
736 // CHECK3-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
737 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
738 // CHECK3-NEXT: store float [[CONV]], float* [[F]], align 4
739 // CHECK3-NEXT: ret void
740 //
741 //
742 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
743 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
744 // CHECK3-NEXT: entry:
745 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
746 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
747 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
748 // CHECK3-NEXT: ret void
749 //
750 //
751 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
752 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
753 // CHECK3-NEXT: entry:
754 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00)
755 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00)
756 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
757 // CHECK3-NEXT: ret void
758 //
759 //
760 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
761 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
762 // CHECK3-NEXT: entry:
763 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
764 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
765 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
766 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4
767 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
768 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
769 // CHECK3-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
770 // CHECK3-NEXT: ret void
771 //
772 //
773 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
774 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
775 // CHECK3-NEXT: entry:
776 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
777 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
778 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
779 // CHECK3: arraydestroy.body:
780 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
781 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
782 // CHECK3-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
783 // CHECK3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
784 // CHECK3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
785 // CHECK3: arraydestroy.done1:
786 // CHECK3-NEXT: ret void
787 //
788 //
789 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
790 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
791 // CHECK3-NEXT: entry:
792 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
793 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
794 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
795 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4
796 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
797 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
798 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
799 // CHECK3-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
800 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
801 // CHECK3-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
802 // CHECK3-NEXT: store float [[ADD]], float* [[F]], align 4
803 // CHECK3-NEXT: ret void
804 //
805 //
806 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
807 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
808 // CHECK3-NEXT: entry:
809 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) @var, float noundef 3.000000e+00)
810 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
811 // CHECK3-NEXT: ret void
812 //
813 //
814 // CHECK3-LABEL: define {{[^@]+}}@main
815 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] {
816 // CHECK3-NEXT: entry:
817 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
818 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
819 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
820 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
821 // CHECK3-NEXT: store i32* @_ZZ4mainE5sivar, i32** [[TMP0]], align 8
822 // CHECK3-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
823 // CHECK3-NEXT: ret i32 0
824 //
825 //
826 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
827 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR5:[0-9]+]] {
828 // CHECK3-NEXT: entry:
829 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
830 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
831 // CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
832 // CHECK3-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
833 // CHECK3-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
834 // CHECK3-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
835 // CHECK3-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
836 // CHECK3-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
837 // CHECK3-NEXT: [[G:%.*]] = alloca i32, align 4
838 // CHECK3-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
839 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
840 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
841 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
842 // CHECK3-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
843 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
844 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
845 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
846 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
847 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
848 // CHECK3-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
849 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[G]], align 4
850 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
851 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[SIVAR1]], align 4
852 // CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
853 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
854 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
855 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
856 // CHECK3-NEXT: [[TMP6:%.*]] = icmp slt i32 [[TMP5]], 1
857 // CHECK3-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP5]], i32 1
858 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
859 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
860 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
861 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
862 // CHECK3: omp.inner.for.cond:
863 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
864 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
865 // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
866 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
867 // CHECK3: omp.inner.for.body:
868 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
869 // CHECK3-NEXT: switch i32 [[TMP11]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
870 // CHECK3-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
871 // CHECK3-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE2:%.*]]
872 // CHECK3-NEXT: ]
873 // CHECK3: .omp.sections.case:
874 // CHECK3-NEXT: store i32 1, i32* [[G]], align 4
875 // CHECK3-NEXT: store i32 10, i32* [[SIVAR1]], align 4
876 // CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
877 // CHECK3: .omp.sections.case2:
878 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
879 // CHECK3-NEXT: store i32* [[G]], i32** [[TMP12]], align 8
880 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
881 // CHECK3-NEXT: store i32* [[SIVAR1]], i32** [[TMP13]], align 8
882 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(16) [[REF_TMP]])
883 // CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
884 // CHECK3: .omp.sections.exit:
885 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
886 // CHECK3: omp.inner.for.inc:
887 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
888 // CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1
889 // CHECK3-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
890 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
891 // CHECK3: omp.inner.for.end:
892 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
893 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]])
894 // CHECK3-NEXT: ret void
895 //
896 //
897 // CHECK3-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_sections_firstprivate_codegen.cpp
898 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
899 // CHECK3-NEXT: entry:
900 // CHECK3-NEXT: call void @__cxx_global_var_init()
901 // CHECK3-NEXT: call void @__cxx_global_var_init.1()
902 // CHECK3-NEXT: call void @__cxx_global_var_init.2()
903 // CHECK3-NEXT: ret void
904 //
905 //
906 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init
907 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" {
908 // CHECK4-NEXT: entry:
909 // CHECK4-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) @test)
910 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
911 // CHECK4-NEXT: ret void
912 //
913 //
914 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
915 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
916 // CHECK4-NEXT: entry:
917 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
918 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
919 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
920 // CHECK4-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
921 // CHECK4-NEXT: ret void
922 //
923 //
924 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
925 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
926 // CHECK4-NEXT: entry:
927 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
928 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
929 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
930 // CHECK4-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
931 // CHECK4-NEXT: ret void
932 //
933 //
934 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
935 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
936 // CHECK4-NEXT: entry:
937 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
938 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
939 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
940 // CHECK4-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
941 // CHECK4-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
942 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
943 // CHECK4-NEXT: store float [[CONV]], float* [[F]], align 4
944 // CHECK4-NEXT: ret void
945 //
946 //
947 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
948 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
949 // CHECK4-NEXT: entry:
950 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
951 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
952 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
953 // CHECK4-NEXT: ret void
954 //
955 //
956 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
957 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
958 // CHECK4-NEXT: entry:
959 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00)
960 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00)
961 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
962 // CHECK4-NEXT: ret void
963 //
964 //
965 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
966 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
967 // CHECK4-NEXT: entry:
968 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
969 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
970 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
971 // CHECK4-NEXT: store float [[A]], float* [[A_ADDR]], align 4
972 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
973 // CHECK4-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
974 // CHECK4-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
975 // CHECK4-NEXT: ret void
976 //
977 //
978 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
979 // CHECK4-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
980 // CHECK4-NEXT: entry:
981 // CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
982 // CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
983 // CHECK4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
984 // CHECK4: arraydestroy.body:
985 // CHECK4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
986 // CHECK4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
987 // CHECK4-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
988 // CHECK4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
989 // CHECK4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
990 // CHECK4: arraydestroy.done1:
991 // CHECK4-NEXT: ret void
992 //
993 //
994 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
995 // CHECK4-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
996 // CHECK4-NEXT: entry:
997 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
998 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
999 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1000 // CHECK4-NEXT: store float [[A]], float* [[A_ADDR]], align 4
1001 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1002 // CHECK4-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1003 // CHECK4-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1004 // CHECK4-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
1005 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
1006 // CHECK4-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
1007 // CHECK4-NEXT: store float [[ADD]], float* [[F]], align 4
1008 // CHECK4-NEXT: ret void
1009 //
1010 //
1011 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
1012 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
1013 // CHECK4-NEXT: entry:
1014 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) @var, float noundef 3.000000e+00)
1015 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
1016 // CHECK4-NEXT: ret void
1017 //
1018 //
1019 // CHECK4-LABEL: define {{[^@]+}}@main
1020 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] {
1021 // CHECK4-NEXT: entry:
1022 // CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
1023 // CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, align 8
1024 // CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4
1025 // CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 0
1026 // CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
1027 // CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 1
1028 // CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
1029 // CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 2
1030 // CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4
1031 // CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 3
1032 // CHECK4-NEXT: store i8* bitcast (void (i8*)* @__main_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8
1033 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 4
1034 // CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.3 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
1035 // CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 5
1036 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
1037 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[BLOCK_CAPTURED]], align 8
1038 // CHECK4-NEXT: [[TMP1:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]] to void ()*
1039 // CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP1]] to %struct.__block_literal_generic*
1040 // CHECK4-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
1041 // CHECK4-NEXT: [[TMP3:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
1042 // CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[TMP2]], align 8
1043 // CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to void (i8*)*
1044 // CHECK4-NEXT: call void [[TMP5]](i8* noundef [[TMP3]])
1045 // CHECK4-NEXT: ret i32 0
1046 //
1047 //
1048 // CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke
1049 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] {
1050 // CHECK4-NEXT: entry:
1051 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1052 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*, align 8
1053 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1054 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*
1055 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>** [[BLOCK_ADDR]], align 8
1056 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @_ZZ4mainE5sivar)
1057 // CHECK4-NEXT: ret void
1058 //
1059 //
1060 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
1061 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR4:[0-9]+]] {
1062 // CHECK4-NEXT: entry:
1063 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1064 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1065 // CHECK4-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
1066 // CHECK4-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
1067 // CHECK4-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
1068 // CHECK4-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
1069 // CHECK4-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
1070 // CHECK4-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
1071 // CHECK4-NEXT: [[G:%.*]] = alloca i32, align 4
1072 // CHECK4-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
1073 // CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, align 8
1074 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1075 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1076 // CHECK4-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
1077 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
1078 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1079 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1080 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
1081 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
1082 // CHECK4-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
1083 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[G]], align 4
1084 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
1085 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[SIVAR1]], align 4
1086 // CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1087 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
1088 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
1089 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1090 // CHECK4-NEXT: [[TMP6:%.*]] = icmp slt i32 [[TMP5]], 1
1091 // CHECK4-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP5]], i32 1
1092 // CHECK4-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
1093 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1094 // CHECK4-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1095 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1096 // CHECK4: omp.inner.for.cond:
1097 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1098 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1099 // CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1100 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1101 // CHECK4: omp.inner.for.body:
1102 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1103 // CHECK4-NEXT: switch i32 [[TMP11]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
1104 // CHECK4-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
1105 // CHECK4-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE2:%.*]]
1106 // CHECK4-NEXT: ]
1107 // CHECK4: .omp.sections.case:
1108 // CHECK4-NEXT: store i32 1, i32* [[G]], align 4
1109 // CHECK4-NEXT: store i32 10, i32* [[SIVAR1]], align 4
1110 // CHECK4-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
1111 // CHECK4: .omp.sections.case2:
1112 // CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 0
1113 // CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
1114 // CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 1
1115 // CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
1116 // CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 2
1117 // CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4
1118 // CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 3
1119 // CHECK4-NEXT: store i8* bitcast (void (i8*)* @var_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8
1120 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 4
1121 // CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
1122 // CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 5
1123 // CHECK4-NEXT: [[TMP12:%.*]] = load volatile i32, i32* [[G]], align 4
1124 // CHECK4-NEXT: store volatile i32 [[TMP12]], i32* [[BLOCK_CAPTURED]], align 8
1125 // CHECK4-NEXT: [[BLOCK_CAPTURED3:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 6
1126 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[SIVAR1]], align 4
1127 // CHECK4-NEXT: store i32 [[TMP13]], i32* [[BLOCK_CAPTURED3]], align 4
1128 // CHECK4-NEXT: [[TMP14:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]] to void ()*
1129 // CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP14]] to %struct.__block_literal_generic*
1130 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
1131 // CHECK4-NEXT: [[TMP16:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
1132 // CHECK4-NEXT: [[TMP17:%.*]] = load i8*, i8** [[TMP15]], align 8
1133 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8* [[TMP17]] to void (i8*)*
1134 // CHECK4-NEXT: call void [[TMP18]](i8* noundef [[TMP16]])
1135 // CHECK4-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
1136 // CHECK4: .omp.sections.exit:
1137 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1138 // CHECK4: omp.inner.for.inc:
1139 // CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1140 // CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP19]], 1
1141 // CHECK4-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1142 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
1143 // CHECK4: omp.inner.for.end:
1144 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
1145 // CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]])
1146 // CHECK4-NEXT: ret void
1147 //
1148 //
1149 // CHECK4-LABEL: define {{[^@]+}}@var_block_invoke
1150 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] {
1151 // CHECK4-NEXT: entry:
1152 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1153 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>*, align 8
1154 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1155 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>*
1156 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>** [[BLOCK_ADDR]], align 8
1157 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 5
1158 // CHECK4-NEXT: store i32 2, i32* [[BLOCK_CAPTURE_ADDR]], align 8
1159 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 6
1160 // CHECK4-NEXT: store i32 20, i32* [[BLOCK_CAPTURE_ADDR1]], align 4
1161 // CHECK4-NEXT: ret void
1162 //
1163 //
1164 // CHECK4-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_sections_firstprivate_codegen.cpp
1165 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" {
1166 // CHECK4-NEXT: entry:
1167 // CHECK4-NEXT: call void @__cxx_global_var_init()
1168 // CHECK4-NEXT: call void @__cxx_global_var_init.1()
1169 // CHECK4-NEXT: call void @__cxx_global_var_init.2()
1170 // CHECK4-NEXT: ret void
1171 //
1172