1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1
5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK4
7
8 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
9 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
11 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // expected-no-diagnostics
14 #ifndef HEADER
15 #define HEADER
16
17 volatile double g;
18
19 template <class T>
20 struct S {
21 T f;
SS22 S(T a) : f(a + g) {}
SS23 S() : f(g) {}
operator TS24 operator T() { return T(); }
operator &S25 S &operator&(const S &) { return *this; }
~SS26 ~S() {}
27 };
28
29
30 template <typename T>
tmain()31 T tmain() {
32 T t;
33 S<T> test;
34 T t_var = T(), t_var1;
35 T vec[] = {1, 2};
36 S<T> s_arr[] = {1, 2};
37 S<T> var(3), var1;
38 #pragma omp parallel
39 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait
40 {
41 vec[0] = t_var;
42 #pragma omp section
43 s_arr[0] = var;
44 }
45 return T();
46 }
47
main()48 int main() {
49 #ifdef LAMBDA
50 [&]() {
51 #pragma omp parallel
52 #pragma omp sections reduction(+:g)
53 {
54
55 // Reduction list for runtime.
56
57 g = 1;
58
59 #pragma omp section
60 [&]() {
61 g = 2;
62 }();
63 }
64 }();
65 return 0;
66 #elif defined(BLOCKS)
67 ^{
68 #pragma omp parallel
69 #pragma omp sections reduction(-:g)
70 {
71
72 // Reduction list for runtime.
73
74 g = 1;
75
76 #pragma omp section
77 ^{
78 g = 2;
79 }();
80 }
81 }();
82 return 0;
83 #else
84 S<float> test;
85 float t_var = 0, t_var1;
86 int vec[] = {1, 2};
87 S<float> s_arr[] = {1, 2};
88 S<float> var(3), var1;
89 #pragma omp parallel
90 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
91 {
92 {
93 vec[0] = t_var;
94 s_arr[0] = var;
95 vec[1] = t_var1;
96 s_arr[1] = var1;
97 }
98 }
99 return tmain<int>();
100 #endif
101 }
102
103
104
105
106
107
108
109
110
111 // Reduction list for runtime.
112
113
114
115 // For + reduction operation initial value of private variable is 0.
116
117 // For & reduction operation initial value of private variable is ones in all bits.
118
119 // For && reduction operation initial value of private variable is 1.0.
120
121 // For min reduction operation initial value of private variable is largest repesentable value.
122
123 // Skip checks for internal operations.
124
125 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
126
127
128 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
129
130
131 // switch(res)
132
133 // case 1:
134 // t_var += t_var_reduction;
135
136 // var = var.operator &(var_reduction);
137
138 // var1 = var1.operator &&(var1_reduction);
139
140 // t_var1 = min(t_var1, t_var1_reduction);
141
142 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
143
144 // break;
145
146 // case 2:
147 // t_var += t_var_reduction;
148
149 // var = var.operator &(var_reduction);
150
151 // var1 = var1.operator &&(var1_reduction);
152
153 // t_var1 = min(t_var1, t_var1_reduction);
154
155 // break;
156
157 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
158 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
159 // ...
160 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
161 // *(Type<n>-1*)rhs[<n>-1]);
162 // }
163 // t_var_lhs = (i{{[0-9]+}}*)lhs[0];
164 // t_var_rhs = (i{{[0-9]+}}*)rhs[0];
165
166 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
167 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
168
169 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
170 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
171
172 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
173 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
174
175 // t_var_lhs += t_var_rhs;
176
177 // var_lhs = var_lhs.operator &(var_rhs);
178
179 // var1_lhs = var1_lhs.operator &&(var1_rhs);
180
181 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
182
183 #endif
184 // CHECK1-LABEL: define {{[^@]+}}@main
185 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
186 // CHECK1-NEXT: entry:
187 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
188 // CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
189 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca float, align 4
190 // CHECK1-NEXT: [[T_VAR1:%.*]] = alloca float, align 4
191 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
192 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
193 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
194 // CHECK1-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
195 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
196 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TEST]])
197 // CHECK1-NEXT: store float 0.000000e+00, float* [[T_VAR]], align 4
198 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
199 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
200 // CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
201 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float noundef 1.000000e+00)
202 // CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
203 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float noundef 2.000000e+00)
204 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]], float noundef 3.000000e+00)
205 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR1]])
206 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float*, %struct.S*, %struct.S*, float*, [2 x i32]*, [2 x %struct.S]*)* @.omp_outlined. to void (i32*, i32*, ...)*), float* [[T_VAR]], %struct.S* [[VAR]], %struct.S* [[VAR1]], float* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S]* [[S_ARR]])
207 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
208 // CHECK1-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
209 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4:[0-9]+]]
210 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
211 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
212 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
213 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
214 // CHECK1: arraydestroy.body:
215 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
216 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
217 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
218 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
219 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
220 // CHECK1: arraydestroy.done1:
221 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
222 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
223 // CHECK1-NEXT: ret i32 [[TMP2]]
224 //
225 //
226 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
227 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
228 // CHECK1-NEXT: entry:
229 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
230 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
231 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
232 // CHECK1-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
233 // CHECK1-NEXT: ret void
234 //
235 //
236 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
237 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
238 // CHECK1-NEXT: entry:
239 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
240 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
241 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
242 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
243 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
244 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
245 // CHECK1-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
246 // CHECK1-NEXT: ret void
247 //
248 //
249 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
250 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR1:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S]* noundef nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3:[0-9]+]] {
251 // CHECK1-NEXT: entry:
252 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
253 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
254 // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca float*, align 8
255 // CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
256 // CHECK1-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S*, align 8
257 // CHECK1-NEXT: [[T_VAR1_ADDR:%.*]] = alloca float*, align 8
258 // CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
259 // CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
260 // CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
261 // CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
262 // CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
263 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
264 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
265 // CHECK1-NEXT: [[T_VAR2:%.*]] = alloca float, align 4
266 // CHECK1-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S:%.*]], align 4
267 // CHECK1-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S]], align 4
268 // CHECK1-NEXT: [[T_VAR15:%.*]] = alloca float, align 4
269 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
270 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
271 // CHECK1-NEXT: [[REF_TMP16:%.*]] = alloca [[STRUCT_S]], align 4
272 // CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca float, align 4
273 // CHECK1-NEXT: [[TMP:%.*]] = alloca float, align 4
274 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
275 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
276 // CHECK1-NEXT: store float* [[T_VAR]], float** [[T_VAR_ADDR]], align 8
277 // CHECK1-NEXT: store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
278 // CHECK1-NEXT: store %struct.S* [[VAR1]], %struct.S** [[VAR1_ADDR]], align 8
279 // CHECK1-NEXT: store float* [[T_VAR1]], float** [[T_VAR1_ADDR]], align 8
280 // CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
281 // CHECK1-NEXT: store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
282 // CHECK1-NEXT: [[TMP0:%.*]] = load float*, float** [[T_VAR_ADDR]], align 8
283 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
284 // CHECK1-NEXT: [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR1_ADDR]], align 8
285 // CHECK1-NEXT: [[TMP3:%.*]] = load float*, float** [[T_VAR1_ADDR]], align 8
286 // CHECK1-NEXT: [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
287 // CHECK1-NEXT: [[TMP5:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
288 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
289 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
290 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
291 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
292 // CHECK1-NEXT: store float 0.000000e+00, float* [[T_VAR2]], align 4
293 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR3]])
294 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR14]])
295 // CHECK1-NEXT: store float 0x47EFFFFFE0000000, float* [[T_VAR15]], align 4
296 // CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
297 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
298 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
299 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
300 // CHECK1-NEXT: [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 0
301 // CHECK1-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 0
302 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
303 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
304 // CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
305 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
306 // CHECK1: omp.inner.for.cond:
307 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
308 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
309 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
310 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
311 // CHECK1: omp.inner.for.body:
312 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
313 // CHECK1-NEXT: switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
314 // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
315 // CHECK1-NEXT: ]
316 // CHECK1: .omp.sections.case:
317 // CHECK1-NEXT: [[TMP15:%.*]] = load float, float* [[T_VAR2]], align 4
318 // CHECK1-NEXT: [[CONV:%.*]] = fptosi float [[TMP15]] to i32
319 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
320 // CHECK1-NEXT: store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
321 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 0
322 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
323 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
324 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
325 // CHECK1-NEXT: [[TMP18:%.*]] = load float, float* [[T_VAR15]], align 4
326 // CHECK1-NEXT: [[CONV7:%.*]] = fptosi float [[TMP18]] to i32
327 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 1
328 // CHECK1-NEXT: store i32 [[CONV7]], i32* [[ARRAYIDX8]], align 4
329 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP5]], i64 0, i64 1
330 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[ARRAYIDX9]] to i8*
331 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
332 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 4, i1 false)
333 // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
334 // CHECK1: .omp.sections.exit:
335 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
336 // CHECK1: omp.inner.for.inc:
337 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
338 // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP21]], 1
339 // CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
340 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
341 // CHECK1: omp.inner.for.end:
342 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
343 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
344 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast float* [[T_VAR2]] to i8*
345 // CHECK1-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8
346 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
347 // CHECK1-NEXT: [[TMP25:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
348 // CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
349 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
350 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
351 // CHECK1-NEXT: store i8* [[TMP27]], i8** [[TMP26]], align 8
352 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
353 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast float* [[T_VAR15]] to i8*
354 // CHECK1-NEXT: store i8* [[TMP29]], i8** [[TMP28]], align 8
355 // CHECK1-NEXT: [[TMP30:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
356 // CHECK1-NEXT: [[TMP31:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP30]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
357 // CHECK1-NEXT: switch i32 [[TMP31]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
358 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
359 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
360 // CHECK1-NEXT: ]
361 // CHECK1: .omp.reduction.case1:
362 // CHECK1-NEXT: [[TMP32:%.*]] = load float, float* [[TMP0]], align 4
363 // CHECK1-NEXT: [[TMP33:%.*]] = load float, float* [[T_VAR2]], align 4
364 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP32]], [[TMP33]]
365 // CHECK1-NEXT: store float [[ADD]], float* [[TMP0]], align 4
366 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR3]])
367 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
368 // CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S* [[CALL]] to i8*
369 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
370 // CHECK1-NEXT: [[CALL10:%.*]] = call noundef float @_ZN1SIfEcvfEv(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP2]])
371 // CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL10]], 0.000000e+00
372 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
373 // CHECK1: land.rhs:
374 // CHECK1-NEXT: [[CALL11:%.*]] = call noundef float @_ZN1SIfEcvfEv(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR14]])
375 // CHECK1-NEXT: [[TOBOOL12:%.*]] = fcmp une float [[CALL11]], 0.000000e+00
376 // CHECK1-NEXT: br label [[LAND_END]]
377 // CHECK1: land.end:
378 // CHECK1-NEXT: [[TMP36:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL12]], [[LAND_RHS]] ]
379 // CHECK1-NEXT: [[CONV13:%.*]] = uitofp i1 [[TMP36]] to float
380 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[REF_TMP]], float noundef [[CONV13]])
381 // CHECK1-NEXT: [[TMP37:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
382 // CHECK1-NEXT: [[TMP38:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
383 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP37]], i8* align 4 [[TMP38]], i64 4, i1 false)
384 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
385 // CHECK1-NEXT: [[TMP39:%.*]] = load float, float* [[TMP3]], align 4
386 // CHECK1-NEXT: [[TMP40:%.*]] = load float, float* [[T_VAR15]], align 4
387 // CHECK1-NEXT: [[CMP14:%.*]] = fcmp olt float [[TMP39]], [[TMP40]]
388 // CHECK1-NEXT: br i1 [[CMP14]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
389 // CHECK1: cond.true:
390 // CHECK1-NEXT: [[TMP41:%.*]] = load float, float* [[TMP3]], align 4
391 // CHECK1-NEXT: br label [[COND_END:%.*]]
392 // CHECK1: cond.false:
393 // CHECK1-NEXT: [[TMP42:%.*]] = load float, float* [[T_VAR15]], align 4
394 // CHECK1-NEXT: br label [[COND_END]]
395 // CHECK1: cond.end:
396 // CHECK1-NEXT: [[COND:%.*]] = phi float [ [[TMP41]], [[COND_TRUE]] ], [ [[TMP42]], [[COND_FALSE]] ]
397 // CHECK1-NEXT: store float [[COND]], float* [[TMP3]], align 4
398 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
399 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
400 // CHECK1: .omp.reduction.case2:
401 // CHECK1-NEXT: [[TMP43:%.*]] = load float, float* [[T_VAR2]], align 4
402 // CHECK1-NEXT: [[TMP44:%.*]] = atomicrmw fadd float* [[TMP0]], float [[TMP43]] monotonic, align 4
403 // CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
404 // CHECK1-NEXT: [[CALL15:%.*]] = call noundef nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR3]])
405 // CHECK1-NEXT: [[TMP45:%.*]] = bitcast %struct.S* [[TMP1]] to i8*
406 // CHECK1-NEXT: [[TMP46:%.*]] = bitcast %struct.S* [[CALL15]] to i8*
407 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP45]], i8* align 4 [[TMP46]], i64 4, i1 false)
408 // CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
409 // CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
410 // CHECK1-NEXT: [[CALL17:%.*]] = call noundef float @_ZN1SIfEcvfEv(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP2]])
411 // CHECK1-NEXT: [[TOBOOL18:%.*]] = fcmp une float [[CALL17]], 0.000000e+00
412 // CHECK1-NEXT: br i1 [[TOBOOL18]], label [[LAND_RHS19:%.*]], label [[LAND_END22:%.*]]
413 // CHECK1: land.rhs19:
414 // CHECK1-NEXT: [[CALL20:%.*]] = call noundef float @_ZN1SIfEcvfEv(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR14]])
415 // CHECK1-NEXT: [[TOBOOL21:%.*]] = fcmp une float [[CALL20]], 0.000000e+00
416 // CHECK1-NEXT: br label [[LAND_END22]]
417 // CHECK1: land.end22:
418 // CHECK1-NEXT: [[TMP47:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL21]], [[LAND_RHS19]] ]
419 // CHECK1-NEXT: [[CONV23:%.*]] = uitofp i1 [[TMP47]] to float
420 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[REF_TMP16]], float noundef [[CONV23]])
421 // CHECK1-NEXT: [[TMP48:%.*]] = bitcast %struct.S* [[TMP2]] to i8*
422 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast %struct.S* [[REF_TMP16]] to i8*
423 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP48]], i8* align 4 [[TMP49]], i64 4, i1 false)
424 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[REF_TMP16]]) #[[ATTR4]]
425 // CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
426 // CHECK1-NEXT: [[TMP50:%.*]] = load float, float* [[T_VAR15]], align 4
427 // CHECK1-NEXT: [[TMP51:%.*]] = bitcast float* [[TMP3]] to i32*
428 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP51]] monotonic, align 4
429 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
430 // CHECK1: atomic_cont:
431 // CHECK1-NEXT: [[TMP52:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[LAND_END22]] ], [ [[TMP62:%.*]], [[COND_END27:%.*]] ]
432 // CHECK1-NEXT: [[TMP53:%.*]] = bitcast float* [[ATOMIC_TEMP]] to i32*
433 // CHECK1-NEXT: [[TMP54:%.*]] = bitcast i32 [[TMP52]] to float
434 // CHECK1-NEXT: store float [[TMP54]], float* [[TMP]], align 4
435 // CHECK1-NEXT: [[TMP55:%.*]] = load float, float* [[TMP]], align 4
436 // CHECK1-NEXT: [[TMP56:%.*]] = load float, float* [[T_VAR15]], align 4
437 // CHECK1-NEXT: [[CMP24:%.*]] = fcmp olt float [[TMP55]], [[TMP56]]
438 // CHECK1-NEXT: br i1 [[CMP24]], label [[COND_TRUE25:%.*]], label [[COND_FALSE26:%.*]]
439 // CHECK1: cond.true25:
440 // CHECK1-NEXT: [[TMP57:%.*]] = load float, float* [[TMP]], align 4
441 // CHECK1-NEXT: br label [[COND_END27]]
442 // CHECK1: cond.false26:
443 // CHECK1-NEXT: [[TMP58:%.*]] = load float, float* [[T_VAR15]], align 4
444 // CHECK1-NEXT: br label [[COND_END27]]
445 // CHECK1: cond.end27:
446 // CHECK1-NEXT: [[COND28:%.*]] = phi float [ [[TMP57]], [[COND_TRUE25]] ], [ [[TMP58]], [[COND_FALSE26]] ]
447 // CHECK1-NEXT: store float [[COND28]], float* [[ATOMIC_TEMP]], align 4
448 // CHECK1-NEXT: [[TMP59:%.*]] = load i32, i32* [[TMP53]], align 4
449 // CHECK1-NEXT: [[TMP60:%.*]] = bitcast float* [[TMP3]] to i32*
450 // CHECK1-NEXT: [[TMP61:%.*]] = cmpxchg i32* [[TMP60]], i32 [[TMP52]], i32 [[TMP59]] monotonic monotonic, align 4
451 // CHECK1-NEXT: [[TMP62]] = extractvalue { i32, i1 } [[TMP61]], 0
452 // CHECK1-NEXT: [[TMP63:%.*]] = extractvalue { i32, i1 } [[TMP61]], 1
453 // CHECK1-NEXT: br i1 [[TMP63]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
454 // CHECK1: atomic_exit:
455 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
456 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
457 // CHECK1: .omp.reduction.default:
458 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
459 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
460 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP7]])
461 // CHECK1-NEXT: ret void
462 //
463 //
464 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
465 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
466 // CHECK1-NEXT: entry:
467 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
468 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
469 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
470 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
471 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
472 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
473 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
474 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
475 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
476 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
477 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
478 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to float*
479 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
480 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
481 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to float*
482 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
483 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
484 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
485 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
486 // CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
487 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S*
488 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
489 // CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
490 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S*
491 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
492 // CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
493 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S*
494 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
495 // CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
496 // CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to float*
497 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
498 // CHECK1-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
499 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to float*
500 // CHECK1-NEXT: [[TMP30:%.*]] = load float, float* [[TMP11]], align 4
501 // CHECK1-NEXT: [[TMP31:%.*]] = load float, float* [[TMP8]], align 4
502 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP30]], [[TMP31]]
503 // CHECK1-NEXT: store float [[ADD]], float* [[TMP11]], align 4
504 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP14]])
505 // CHECK1-NEXT: [[TMP32:%.*]] = bitcast %struct.S* [[TMP17]] to i8*
506 // CHECK1-NEXT: [[TMP33:%.*]] = bitcast %struct.S* [[CALL]] to i8*
507 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
508 // CHECK1-NEXT: [[CALL2:%.*]] = call noundef float @_ZN1SIfEcvfEv(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP23]])
509 // CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL2]], 0.000000e+00
510 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
511 // CHECK1: land.rhs:
512 // CHECK1-NEXT: [[CALL3:%.*]] = call noundef float @_ZN1SIfEcvfEv(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP20]])
513 // CHECK1-NEXT: [[TOBOOL4:%.*]] = fcmp une float [[CALL3]], 0.000000e+00
514 // CHECK1-NEXT: br label [[LAND_END]]
515 // CHECK1: land.end:
516 // CHECK1-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
517 // CHECK1-NEXT: [[CONV:%.*]] = uitofp i1 [[TMP34]] to float
518 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[REF_TMP]], float noundef [[CONV]])
519 // CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S* [[TMP23]] to i8*
520 // CHECK1-NEXT: [[TMP36:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
521 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
522 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
523 // CHECK1-NEXT: [[TMP37:%.*]] = load float, float* [[TMP29]], align 4
524 // CHECK1-NEXT: [[TMP38:%.*]] = load float, float* [[TMP26]], align 4
525 // CHECK1-NEXT: [[CMP:%.*]] = fcmp olt float [[TMP37]], [[TMP38]]
526 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
527 // CHECK1: cond.true:
528 // CHECK1-NEXT: [[TMP39:%.*]] = load float, float* [[TMP29]], align 4
529 // CHECK1-NEXT: br label [[COND_END:%.*]]
530 // CHECK1: cond.false:
531 // CHECK1-NEXT: [[TMP40:%.*]] = load float, float* [[TMP26]], align 4
532 // CHECK1-NEXT: br label [[COND_END]]
533 // CHECK1: cond.end:
534 // CHECK1-NEXT: [[COND:%.*]] = phi float [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
535 // CHECK1-NEXT: store float [[COND]], float* [[TMP29]], align 4
536 // CHECK1-NEXT: ret void
537 //
538 //
539 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEanERKS0_
540 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] align 2 {
541 // CHECK1-NEXT: entry:
542 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
543 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8
544 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
545 // CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
546 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
547 // CHECK1-NEXT: ret %struct.S* [[THIS1]]
548 //
549 //
550 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEcvfEv
551 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
552 // CHECK1-NEXT: entry:
553 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
554 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
555 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
556 // CHECK1-NEXT: ret float 0.000000e+00
557 //
558 //
559 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
560 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
561 // CHECK1-NEXT: entry:
562 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
563 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
564 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
565 // CHECK1-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
566 // CHECK1-NEXT: ret void
567 //
568 //
569 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
570 // CHECK1-SAME: () #[[ATTR6]] {
571 // CHECK1-NEXT: entry:
572 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
573 // CHECK1-NEXT: [[T:%.*]] = alloca i32, align 4
574 // CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
575 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
576 // CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
577 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
578 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
579 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
580 // CHECK1-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 4
581 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
582 // CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4
583 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
584 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
585 // CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
586 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
587 // CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
588 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
589 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
590 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR1]])
591 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.S.0*, %struct.S.0*, i32*, [2 x i32]*, [2 x %struct.S.0]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[T_VAR]], %struct.S.0* [[VAR]], %struct.S.0* [[VAR1]], i32* [[T_VAR1]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]])
592 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
593 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
594 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
595 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
596 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
597 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
598 // CHECK1: arraydestroy.body:
599 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
600 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
601 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
602 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
603 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
604 // CHECK1: arraydestroy.done1:
605 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
606 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
607 // CHECK1-NEXT: ret i32 [[TMP2]]
608 //
609 //
610 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
611 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
612 // CHECK1-NEXT: entry:
613 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
614 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
615 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
616 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
617 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile double, double* @g, align 8
618 // CHECK1-NEXT: [[CONV:%.*]] = fptrunc double [[TMP0]] to float
619 // CHECK1-NEXT: store float [[CONV]], float* [[F]], align 4
620 // CHECK1-NEXT: ret void
621 //
622 //
623 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
624 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
625 // CHECK1-NEXT: entry:
626 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
627 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
628 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
629 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
630 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
631 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
632 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
633 // CHECK1-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to double
634 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile double, double* @g, align 8
635 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
636 // CHECK1-NEXT: [[CONV2:%.*]] = fptrunc double [[ADD]] to float
637 // CHECK1-NEXT: store float [[CONV2]], float* [[F]], align 4
638 // CHECK1-NEXT: ret void
639 //
640 //
641 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
642 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
643 // CHECK1-NEXT: entry:
644 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
645 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
646 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
647 // CHECK1-NEXT: ret void
648 //
649 //
650 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
651 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
652 // CHECK1-NEXT: entry:
653 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
654 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
655 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
656 // CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
657 // CHECK1-NEXT: ret void
658 //
659 //
660 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
661 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
662 // CHECK1-NEXT: entry:
663 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
664 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
665 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
666 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
667 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
668 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
669 // CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
670 // CHECK1-NEXT: ret void
671 //
672 //
673 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
674 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR1:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* noundef nonnull align 4 dereferenceable(8) [[S_ARR:%.*]]) #[[ATTR3]] {
675 // CHECK1-NEXT: entry:
676 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
677 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
678 // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
679 // CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
680 // CHECK1-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S.0*, align 8
681 // CHECK1-NEXT: [[T_VAR1_ADDR:%.*]] = alloca i32*, align 8
682 // CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
683 // CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
684 // CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
685 // CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
686 // CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
687 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
688 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
689 // CHECK1-NEXT: [[T_VAR2:%.*]] = alloca i32, align 4
690 // CHECK1-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
691 // CHECK1-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S_0]], align 4
692 // CHECK1-NEXT: [[T_VAR15:%.*]] = alloca i32, align 4
693 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
694 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
695 // CHECK1-NEXT: [[REF_TMP13:%.*]] = alloca [[STRUCT_S_0]], align 4
696 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
697 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
698 // CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
699 // CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
700 // CHECK1-NEXT: store %struct.S.0* [[VAR1]], %struct.S.0** [[VAR1_ADDR]], align 8
701 // CHECK1-NEXT: store i32* [[T_VAR1]], i32** [[T_VAR1_ADDR]], align 8
702 // CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
703 // CHECK1-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
704 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
705 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
706 // CHECK1-NEXT: [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR1_ADDR]], align 8
707 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[T_VAR1_ADDR]], align 8
708 // CHECK1-NEXT: [[TMP4:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
709 // CHECK1-NEXT: [[TMP5:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
710 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
711 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
712 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
713 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
714 // CHECK1-NEXT: store i32 0, i32* [[T_VAR2]], align 4
715 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR3]])
716 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR14]])
717 // CHECK1-NEXT: store i32 2147483647, i32* [[T_VAR15]], align 4
718 // CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
719 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
720 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
721 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
722 // CHECK1-NEXT: [[TMP9:%.*]] = icmp slt i32 [[TMP8]], 1
723 // CHECK1-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 1
724 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
725 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
726 // CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
727 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
728 // CHECK1: omp.inner.for.cond:
729 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
730 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
731 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
732 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
733 // CHECK1: omp.inner.for.body:
734 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
735 // CHECK1-NEXT: switch i32 [[TMP14]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
736 // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
737 // CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE6:%.*]]
738 // CHECK1-NEXT: ]
739 // CHECK1: .omp.sections.case:
740 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR2]], align 4
741 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP4]], i64 0, i64 0
742 // CHECK1-NEXT: store i32 [[TMP15]], i32* [[ARRAYIDX]], align 4
743 // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
744 // CHECK1: .omp.sections.case6:
745 // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP5]], i64 0, i64 0
746 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast %struct.S.0* [[ARRAYIDX7]] to i8*
747 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
748 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false)
749 // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
750 // CHECK1: .omp.sections.exit:
751 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
752 // CHECK1: omp.inner.for.inc:
753 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
754 // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP18]], 1
755 // CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
756 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
757 // CHECK1: omp.inner.for.end:
758 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]])
759 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
760 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast i32* [[T_VAR2]] to i8*
761 // CHECK1-NEXT: store i8* [[TMP20]], i8** [[TMP19]], align 8
762 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
763 // CHECK1-NEXT: [[TMP22:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
764 // CHECK1-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8
765 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
766 // CHECK1-NEXT: [[TMP24:%.*]] = bitcast %struct.S.0* [[VAR14]] to i8*
767 // CHECK1-NEXT: store i8* [[TMP24]], i8** [[TMP23]], align 8
768 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
769 // CHECK1-NEXT: [[TMP26:%.*]] = bitcast i32* [[T_VAR15]] to i8*
770 // CHECK1-NEXT: store i8* [[TMP26]], i8** [[TMP25]], align 8
771 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
772 // CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 4, i64 32, i8* [[TMP27]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
773 // CHECK1-NEXT: switch i32 [[TMP28]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
774 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
775 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
776 // CHECK1-NEXT: ]
777 // CHECK1: .omp.reduction.case1:
778 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP0]], align 4
779 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[T_VAR2]], align 4
780 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
781 // CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
782 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR3]])
783 // CHECK1-NEXT: [[TMP31:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
784 // CHECK1-NEXT: [[TMP32:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
785 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP31]], i8* align 4 [[TMP32]], i64 4, i1 false)
786 // CHECK1-NEXT: [[CALL8:%.*]] = call noundef i32 @_ZN1SIiEcviEv(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP2]])
787 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL8]], 0
788 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
789 // CHECK1: land.rhs:
790 // CHECK1-NEXT: [[CALL9:%.*]] = call noundef i32 @_ZN1SIiEcviEv(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR14]])
791 // CHECK1-NEXT: [[TOBOOL10:%.*]] = icmp ne i32 [[CALL9]], 0
792 // CHECK1-NEXT: br label [[LAND_END]]
793 // CHECK1: land.end:
794 // CHECK1-NEXT: [[TMP33:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL10]], [[LAND_RHS]] ]
795 // CHECK1-NEXT: [[CONV:%.*]] = zext i1 [[TMP33]] to i32
796 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 noundef [[CONV]])
797 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
798 // CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
799 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
800 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
801 // CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP3]], align 4
802 // CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[T_VAR15]], align 4
803 // CHECK1-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP36]], [[TMP37]]
804 // CHECK1-NEXT: br i1 [[CMP11]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
805 // CHECK1: cond.true:
806 // CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP3]], align 4
807 // CHECK1-NEXT: br label [[COND_END:%.*]]
808 // CHECK1: cond.false:
809 // CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[T_VAR15]], align 4
810 // CHECK1-NEXT: br label [[COND_END]]
811 // CHECK1: cond.end:
812 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP38]], [[COND_TRUE]] ], [ [[TMP39]], [[COND_FALSE]] ]
813 // CHECK1-NEXT: store i32 [[COND]], i32* [[TMP3]], align 4
814 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
815 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
816 // CHECK1: .omp.reduction.case2:
817 // CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[T_VAR2]], align 4
818 // CHECK1-NEXT: [[TMP41:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP40]] monotonic, align 4
819 // CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
820 // CHECK1-NEXT: [[CALL12:%.*]] = call noundef nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR3]])
821 // CHECK1-NEXT: [[TMP42:%.*]] = bitcast %struct.S.0* [[TMP1]] to i8*
822 // CHECK1-NEXT: [[TMP43:%.*]] = bitcast %struct.S.0* [[CALL12]] to i8*
823 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i64 4, i1 false)
824 // CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
825 // CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
826 // CHECK1-NEXT: [[CALL14:%.*]] = call noundef i32 @_ZN1SIiEcviEv(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP2]])
827 // CHECK1-NEXT: [[TOBOOL15:%.*]] = icmp ne i32 [[CALL14]], 0
828 // CHECK1-NEXT: br i1 [[TOBOOL15]], label [[LAND_RHS16:%.*]], label [[LAND_END19:%.*]]
829 // CHECK1: land.rhs16:
830 // CHECK1-NEXT: [[CALL17:%.*]] = call noundef i32 @_ZN1SIiEcviEv(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR14]])
831 // CHECK1-NEXT: [[TOBOOL18:%.*]] = icmp ne i32 [[CALL17]], 0
832 // CHECK1-NEXT: br label [[LAND_END19]]
833 // CHECK1: land.end19:
834 // CHECK1-NEXT: [[TMP44:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL18]], [[LAND_RHS16]] ]
835 // CHECK1-NEXT: [[CONV20:%.*]] = zext i1 [[TMP44]] to i32
836 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[REF_TMP13]], i32 noundef [[CONV20]])
837 // CHECK1-NEXT: [[TMP45:%.*]] = bitcast %struct.S.0* [[TMP2]] to i8*
838 // CHECK1-NEXT: [[TMP46:%.*]] = bitcast %struct.S.0* [[REF_TMP13]] to i8*
839 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP45]], i8* align 4 [[TMP46]], i64 4, i1 false)
840 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[REF_TMP13]]) #[[ATTR4]]
841 // CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
842 // CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[T_VAR15]], align 4
843 // CHECK1-NEXT: [[TMP48:%.*]] = atomicrmw min i32* [[TMP3]], i32 [[TMP47]] monotonic, align 4
844 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
845 // CHECK1: .omp.reduction.default:
846 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR4]]
847 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR4]]
848 // CHECK1-NEXT: ret void
849 //
850 //
851 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
852 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR5]] {
853 // CHECK1-NEXT: entry:
854 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
855 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
856 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
857 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
858 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
859 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
860 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
861 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
862 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
863 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
864 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
865 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
866 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
867 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
868 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
869 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
870 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
871 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
872 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
873 // CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
874 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S.0*
875 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
876 // CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
877 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S.0*
878 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
879 // CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
880 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S.0*
881 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
882 // CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
883 // CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to i32*
884 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
885 // CHECK1-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
886 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to i32*
887 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP11]], align 4
888 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP8]], align 4
889 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
890 // CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
891 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP14]])
892 // CHECK1-NEXT: [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP17]] to i8*
893 // CHECK1-NEXT: [[TMP33:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
894 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
895 // CHECK1-NEXT: [[CALL2:%.*]] = call noundef i32 @_ZN1SIiEcviEv(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP23]])
896 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL2]], 0
897 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
898 // CHECK1: land.rhs:
899 // CHECK1-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZN1SIiEcviEv(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP20]])
900 // CHECK1-NEXT: [[TOBOOL4:%.*]] = icmp ne i32 [[CALL3]], 0
901 // CHECK1-NEXT: br label [[LAND_END]]
902 // CHECK1: land.end:
903 // CHECK1-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
904 // CHECK1-NEXT: [[CONV:%.*]] = zext i1 [[TMP34]] to i32
905 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 noundef [[CONV]])
906 // CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP23]] to i8*
907 // CHECK1-NEXT: [[TMP36:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
908 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
909 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
910 // CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP29]], align 4
911 // CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP26]], align 4
912 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]]
913 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
914 // CHECK1: cond.true:
915 // CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[TMP29]], align 4
916 // CHECK1-NEXT: br label [[COND_END:%.*]]
917 // CHECK1: cond.false:
918 // CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[TMP26]], align 4
919 // CHECK1-NEXT: br label [[COND_END]]
920 // CHECK1: cond.end:
921 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
922 // CHECK1-NEXT: store i32 [[COND]], i32* [[TMP29]], align 4
923 // CHECK1-NEXT: ret void
924 //
925 //
926 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEanERKS0_
927 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6]] align 2 {
928 // CHECK1-NEXT: entry:
929 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
930 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
931 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
932 // CHECK1-NEXT: store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
933 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
934 // CHECK1-NEXT: ret %struct.S.0* [[THIS1]]
935 //
936 //
937 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEcviEv
938 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
939 // CHECK1-NEXT: entry:
940 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
941 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
942 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
943 // CHECK1-NEXT: ret i32 0
944 //
945 //
946 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
947 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
948 // CHECK1-NEXT: entry:
949 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
950 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
951 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
952 // CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
953 // CHECK1-NEXT: ret void
954 //
955 //
956 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
957 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
958 // CHECK1-NEXT: entry:
959 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
960 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
961 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
962 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
963 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile double, double* @g, align 8
964 // CHECK1-NEXT: [[CONV:%.*]] = fptosi double [[TMP0]] to i32
965 // CHECK1-NEXT: store i32 [[CONV]], i32* [[F]], align 4
966 // CHECK1-NEXT: ret void
967 //
968 //
969 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
970 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
971 // CHECK1-NEXT: entry:
972 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
973 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
974 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
975 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
976 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
977 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
978 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
979 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to double
980 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile double, double* @g, align 8
981 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
982 // CHECK1-NEXT: [[CONV2:%.*]] = fptosi double [[ADD]] to i32
983 // CHECK1-NEXT: store i32 [[CONV2]], i32* [[F]], align 4
984 // CHECK1-NEXT: ret void
985 //
986 //
987 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
988 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
989 // CHECK1-NEXT: entry:
990 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
991 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
992 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
993 // CHECK1-NEXT: ret void
994 //
995 //
996 // CHECK3-LABEL: define {{[^@]+}}@main
997 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
998 // CHECK3-NEXT: entry:
999 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
1000 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
1001 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
1002 // CHECK3-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
1003 // CHECK3-NEXT: ret i32 0
1004 //
1005 //
1006 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
1007 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2:[0-9]+]] {
1008 // CHECK3-NEXT: entry:
1009 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1010 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1011 // CHECK3-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
1012 // CHECK3-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
1013 // CHECK3-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
1014 // CHECK3-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
1015 // CHECK3-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
1016 // CHECK3-NEXT: [[G:%.*]] = alloca double, align 8
1017 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1018 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1019 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1020 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1021 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1022 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1023 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
1024 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
1025 // CHECK3-NEXT: store double 0.000000e+00, double* [[G]], align 8
1026 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1027 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1028 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
1029 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1030 // CHECK3-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1
1031 // CHECK3-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
1032 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
1033 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1034 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1035 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1036 // CHECK3: omp.inner.for.cond:
1037 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1038 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1039 // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1040 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1041 // CHECK3: omp.inner.for.body:
1042 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1043 // CHECK3-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
1044 // CHECK3-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
1045 // CHECK3-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
1046 // CHECK3-NEXT: ]
1047 // CHECK3: .omp.sections.case:
1048 // CHECK3-NEXT: store double 1.000000e+00, double* [[G]], align 8
1049 // CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
1050 // CHECK3: .omp.sections.case1:
1051 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1052 // CHECK3-NEXT: store double* [[G]], double** [[TMP9]], align 8
1053 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
1054 // CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
1055 // CHECK3: .omp.sections.exit:
1056 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1057 // CHECK3: omp.inner.for.inc:
1058 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1059 // CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1
1060 // CHECK3-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1061 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
1062 // CHECK3: omp.inner.for.end:
1063 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1064 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1065 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast double* [[G]] to i8*
1066 // CHECK3-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
1067 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1068 // CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 1, i64 8, i8* [[TMP13]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1069 // CHECK3-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1070 // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1071 // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1072 // CHECK3-NEXT: ]
1073 // CHECK3: .omp.reduction.case1:
1074 // CHECK3-NEXT: [[TMP15:%.*]] = load double, double* @g, align 8
1075 // CHECK3-NEXT: [[TMP16:%.*]] = load double, double* [[G]], align 8
1076 // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[TMP15]], [[TMP16]]
1077 // CHECK3-NEXT: store double [[ADD]], double* @g, align 8
1078 // CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1079 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
1080 // CHECK3: .omp.reduction.case2:
1081 // CHECK3-NEXT: [[TMP17:%.*]] = load double, double* [[G]], align 8
1082 // CHECK3-NEXT: [[TMP18:%.*]] = atomicrmw fadd double* @g, double [[TMP17]] monotonic, align 8
1083 // CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1084 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
1085 // CHECK3: .omp.reduction.default:
1086 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP1]])
1087 // CHECK3-NEXT: ret void
1088 //
1089 //
1090 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1091 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
1092 // CHECK3-NEXT: entry:
1093 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1094 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
1095 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1096 // CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1097 // CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1098 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1099 // CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1100 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1101 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1102 // CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1103 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*
1104 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1105 // CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1106 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*
1107 // CHECK3-NEXT: [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
1108 // CHECK3-NEXT: [[TMP13:%.*]] = load double, double* [[TMP8]], align 8
1109 // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
1110 // CHECK3-NEXT: store double [[ADD]], double* [[TMP11]], align 8
1111 // CHECK3-NEXT: ret void
1112 //
1113 //
1114 // CHECK4-LABEL: define {{[^@]+}}@main
1115 // CHECK4-SAME: () #[[ATTR1:[0-9]+]] {
1116 // CHECK4-NEXT: entry:
1117 // CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
1118 // CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4
1119 // CHECK4-NEXT: [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
1120 // CHECK4-NEXT: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
1121 // CHECK4-NEXT: call void [[TMP1]](i8* noundef bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
1122 // CHECK4-NEXT: ret i32 0
1123 //
1124 //
1125 // CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke
1126 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
1127 // CHECK4-NEXT: entry:
1128 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1129 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
1130 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1131 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
1132 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
1133 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1134 // CHECK4-NEXT: ret void
1135 //
1136 //
1137 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
1138 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
1139 // CHECK4-NEXT: entry:
1140 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1141 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1142 // CHECK4-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
1143 // CHECK4-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
1144 // CHECK4-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
1145 // CHECK4-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
1146 // CHECK4-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
1147 // CHECK4-NEXT: [[G:%.*]] = alloca double, align 8
1148 // CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, align 8
1149 // CHECK4-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1150 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1151 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1152 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1153 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1154 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
1155 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
1156 // CHECK4-NEXT: store double 0.000000e+00, double* [[G]], align 8
1157 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1158 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1159 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
1160 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1161 // CHECK4-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1
1162 // CHECK4-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
1163 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
1164 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1165 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1166 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1167 // CHECK4: omp.inner.for.cond:
1168 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1169 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1170 // CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1171 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1172 // CHECK4: omp.inner.for.body:
1173 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1174 // CHECK4-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
1175 // CHECK4-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
1176 // CHECK4-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
1177 // CHECK4-NEXT: ]
1178 // CHECK4: .omp.sections.case:
1179 // CHECK4-NEXT: store double 1.000000e+00, double* [[G]], align 8
1180 // CHECK4-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
1181 // CHECK4: .omp.sections.case1:
1182 // CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 0
1183 // CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
1184 // CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 1
1185 // CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
1186 // CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 2
1187 // CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4
1188 // CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 3
1189 // CHECK4-NEXT: store i8* bitcast (void (i8*)* @_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8
1190 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 4
1191 // CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
1192 // CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 5
1193 // CHECK4-NEXT: [[TMP9:%.*]] = load volatile double, double* [[G]], align 8
1194 // CHECK4-NEXT: store volatile double [[TMP9]], double* [[BLOCK_CAPTURED]], align 8
1195 // CHECK4-NEXT: [[TMP10:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]] to void ()*
1196 // CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP10]] to %struct.__block_literal_generic*
1197 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
1198 // CHECK4-NEXT: [[TMP12:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
1199 // CHECK4-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP11]], align 8
1200 // CHECK4-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to void (i8*)*
1201 // CHECK4-NEXT: call void [[TMP14]](i8* noundef [[TMP12]])
1202 // CHECK4-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
1203 // CHECK4: .omp.sections.exit:
1204 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1205 // CHECK4: omp.inner.for.inc:
1206 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1207 // CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP15]], 1
1208 // CHECK4-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1209 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
1210 // CHECK4: omp.inner.for.end:
1211 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1212 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1213 // CHECK4-NEXT: [[TMP17:%.*]] = bitcast double* [[G]] to i8*
1214 // CHECK4-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8
1215 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1216 // CHECK4-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 1, i64 8, i8* [[TMP18]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1217 // CHECK4-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1218 // CHECK4-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1219 // CHECK4-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1220 // CHECK4-NEXT: ]
1221 // CHECK4: .omp.reduction.case1:
1222 // CHECK4-NEXT: [[TMP20:%.*]] = load double, double* @g, align 8
1223 // CHECK4-NEXT: [[TMP21:%.*]] = load double, double* [[G]], align 8
1224 // CHECK4-NEXT: [[ADD:%.*]] = fadd double [[TMP20]], [[TMP21]]
1225 // CHECK4-NEXT: store double [[ADD]], double* @g, align 8
1226 // CHECK4-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1227 // CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
1228 // CHECK4: .omp.reduction.case2:
1229 // CHECK4-NEXT: [[TMP22:%.*]] = load double, double* [[G]], align 8
1230 // CHECK4-NEXT: [[TMP23:%.*]] = atomicrmw fadd double* @g, double [[TMP22]] monotonic, align 8
1231 // CHECK4-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1232 // CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
1233 // CHECK4: .omp.reduction.default:
1234 // CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP1]])
1235 // CHECK4-NEXT: ret void
1236 //
1237 //
1238 // CHECK4-LABEL: define {{[^@]+}}@_block_invoke
1239 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
1240 // CHECK4-NEXT: entry:
1241 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1242 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>*, align 8
1243 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1244 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>*
1245 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>** [[BLOCK_ADDR]], align 8
1246 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 5
1247 // CHECK4-NEXT: store double 2.000000e+00, double* [[BLOCK_CAPTURE_ADDR]], align 8
1248 // CHECK4-NEXT: ret void
1249 //
1250 //
1251 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1252 // CHECK4-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
1253 // CHECK4-NEXT: entry:
1254 // CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1255 // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
1256 // CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1257 // CHECK4-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1258 // CHECK4-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1259 // CHECK4-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1260 // CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1261 // CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1262 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1263 // CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1264 // CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*
1265 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1266 // CHECK4-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1267 // CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*
1268 // CHECK4-NEXT: [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
1269 // CHECK4-NEXT: [[TMP13:%.*]] = load double, double* [[TMP8]], align 8
1270 // CHECK4-NEXT: [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
1271 // CHECK4-NEXT: store double [[ADD]], double* [[TMP11]], align 8
1272 // CHECK4-NEXT: ret void
1273 //
1274