1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK2
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK4
7 
8 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
9 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
10 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
11 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // expected-no-diagnostics
14 #ifndef HEADER
15 #define HEADER
16 
17 volatile double g;
18 
19 template <class T>
20 struct S {
21   T f;
22   S(T a) : f(a + g) {}
23   S() : f(g) {}
24   operator T() { return T(); }
25   S &operator&(const S &) { return *this; }
26   ~S() {}
27 };
28 
29 
30 template <typename T>
31 T tmain() {
32   T t;
33   S<T> test;
34   T t_var = T(), t_var1;
35   T vec[] = {1, 2};
36   S<T> s_arr[] = {1, 2};
37   S<T> var(3), var1;
38 #pragma omp parallel
39 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait
40   {
41     vec[0] = t_var;
42 #pragma omp section
43     s_arr[0] = var;
44   }
45   return T();
46 }
47 
48 int main() {
49 #ifdef LAMBDA
50   [&]() {
51 #pragma omp parallel
52 #pragma omp sections reduction(+:g)
53     {
54 
55     // Reduction list for runtime.
56 
57     g = 1;
58 
59 #pragma omp section
60     [&]() {
61       g = 2;
62     }();
63   }
64   }();
65   return 0;
66 #elif defined(BLOCKS)
67   ^{
68 #pragma omp parallel
69 #pragma omp sections reduction(-:g)
70     {
71 
72     // Reduction list for runtime.
73 
74     g = 1;
75 
76 #pragma omp section
77     ^{
78       g = 2;
79     }();
80   }
81   }();
82   return 0;
83 #else
84   S<float> test;
85   float t_var = 0, t_var1;
86   int vec[] = {1, 2};
87   S<float> s_arr[] = {1, 2};
88   S<float> var(3), var1;
89 #pragma omp parallel
90 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
91   {
92     {
93     vec[0] = t_var;
94     s_arr[0] = var;
95     vec[1] = t_var1;
96     s_arr[1] = var1;
97     }
98   }
99   return tmain<int>();
100 #endif
101 }
102 
103 
104 
105 
106 
107 
108 
109 
110 
111 // Reduction list for runtime.
112 
113 
114 
115 // For + reduction operation initial value of private variable is 0.
116 
117 // For & reduction operation initial value of private variable is ones in all bits.
118 
119 // For && reduction operation initial value of private variable is 1.0.
120 
121 // For min reduction operation initial value of private variable is largest repesentable value.
122 
123 // Skip checks for internal operations.
124 
125 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
126 
127 
128 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
129 
130 
131 // switch(res)
132 
133 // case 1:
134 // t_var += t_var_reduction;
135 
136 // var = var.operator &(var_reduction);
137 
138 // var1 = var1.operator &&(var1_reduction);
139 
140 // t_var1 = min(t_var1, t_var1_reduction);
141 
142 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
143 
144 // break;
145 
146 // case 2:
147 // t_var += t_var_reduction;
148 
149 // var = var.operator &(var_reduction);
150 
151 // var1 = var1.operator &&(var1_reduction);
152 
153 // t_var1 = min(t_var1, t_var1_reduction);
154 
155 // break;
156 
157 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
158 //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
159 //  ...
160 //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
161 //  *(Type<n>-1*)rhs[<n>-1]);
162 // }
163 // t_var_lhs = (i{{[0-9]+}}*)lhs[0];
164 // t_var_rhs = (i{{[0-9]+}}*)rhs[0];
165 
166 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
167 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
168 
169 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
170 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
171 
172 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
173 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
174 
175 // t_var_lhs += t_var_rhs;
176 
177 // var_lhs = var_lhs.operator &(var_rhs);
178 
179 // var1_lhs = var1_lhs.operator &&(var1_rhs);
180 
181 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
182 
183 #endif
184 // CHECK1-LABEL: define {{[^@]+}}@main
185 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
186 // CHECK1-NEXT:  entry:
187 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
188 // CHECK1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
189 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca float, align 4
190 // CHECK1-NEXT:    [[T_VAR1:%.*]] = alloca float, align 4
191 // CHECK1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
192 // CHECK1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
193 // CHECK1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
194 // CHECK1-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
195 // CHECK1-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
196 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
197 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
198 // CHECK1-NEXT:    store float 0.000000e+00, float* [[T_VAR]], align 4
199 // CHECK1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
200 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
201 // CHECK1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
202 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
203 // CHECK1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
204 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
205 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]], float 3.000000e+00)
206 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
207 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
208 // CHECK1-NEXT:    store float* [[T_VAR]], float** [[TMP1]], align 8
209 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
210 // CHECK1-NEXT:    store %struct.S* [[VAR]], %struct.S** [[TMP2]], align 8
211 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
212 // CHECK1-NEXT:    store %struct.S* [[VAR1]], %struct.S** [[TMP3]], align 8
213 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
214 // CHECK1-NEXT:    store float* [[T_VAR1]], float** [[TMP4]], align 8
215 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 4
216 // CHECK1-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP5]], align 8
217 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 5
218 // CHECK1-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP6]], align 8
219 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
220 // CHECK1-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
221 // CHECK1-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
222 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4:[0-9]+]]
223 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
224 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
225 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
226 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
227 // CHECK1:       arraydestroy.body:
228 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP7]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
229 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
230 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
231 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
232 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
233 // CHECK1:       arraydestroy.done1:
234 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
235 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[RETVAL]], align 4
236 // CHECK1-NEXT:    ret i32 [[TMP8]]
237 //
238 //
239 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
240 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
241 // CHECK1-NEXT:  entry:
242 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
243 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
244 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
245 // CHECK1-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
246 // CHECK1-NEXT:    ret void
247 //
248 //
249 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
250 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
251 // CHECK1-NEXT:  entry:
252 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
253 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
254 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
255 // CHECK1-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
256 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
257 // CHECK1-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
258 // CHECK1-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
259 // CHECK1-NEXT:    ret void
260 //
261 //
262 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
263 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
264 // CHECK1-NEXT:  entry:
265 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
266 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
267 // CHECK1-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
268 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
269 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
270 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
271 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
272 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
273 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca float, align 4
274 // CHECK1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
275 // CHECK1-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
276 // CHECK1-NEXT:    [[T_VAR1:%.*]] = alloca float, align 4
277 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
278 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
279 // CHECK1-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
280 // CHECK1-NEXT:    [[TMP:%.*]] = alloca float, align 4
281 // CHECK1-NEXT:    [[REF_TMP12:%.*]] = alloca [[STRUCT_S]], align 4
282 // CHECK1-NEXT:    [[ATOMIC_TEMP22:%.*]] = alloca float, align 4
283 // CHECK1-NEXT:    [[_TMP23:%.*]] = alloca float, align 4
284 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
285 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
286 // CHECK1-NEXT:    store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
287 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
288 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
289 // CHECK1-NEXT:    [[TMP2:%.*]] = load float*, float** [[TMP1]], align 8
290 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
291 // CHECK1-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[TMP3]], align 8
292 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 2
293 // CHECK1-NEXT:    [[TMP6:%.*]] = load %struct.S*, %struct.S** [[TMP5]], align 8
294 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 3
295 // CHECK1-NEXT:    [[TMP8:%.*]] = load float*, float** [[TMP7]], align 8
296 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 4
297 // CHECK1-NEXT:    [[TMP10:%.*]] = load [2 x i32]*, [2 x i32]** [[TMP9]], align 8
298 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 5
299 // CHECK1-NEXT:    [[TMP12:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[TMP11]], align 8
300 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
301 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
302 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
303 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
304 // CHECK1-NEXT:    store float 0.000000e+00, float* [[T_VAR]], align 4
305 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
306 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
307 // CHECK1-NEXT:    store float 0x47EFFFFFE0000000, float* [[T_VAR1]], align 4
308 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
309 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
310 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP14]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
311 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
312 // CHECK1-NEXT:    [[TMP16:%.*]] = icmp slt i32 [[TMP15]], 0
313 // CHECK1-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP15]], i32 0
314 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
315 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
316 // CHECK1-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
317 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
318 // CHECK1:       omp.inner.for.cond:
319 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
320 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
321 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
322 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
323 // CHECK1:       omp.inner.for.body:
324 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
325 // CHECK1-NEXT:    switch i32 [[TMP21]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
326 // CHECK1-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
327 // CHECK1-NEXT:    ]
328 // CHECK1:       .omp.sections.case:
329 // CHECK1-NEXT:    [[TMP22:%.*]] = load float, float* [[T_VAR]], align 4
330 // CHECK1-NEXT:    [[CONV:%.*]] = fptosi float [[TMP22]] to i32
331 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP10]], i64 0, i64 0
332 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
333 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP12]], i64 0, i64 0
334 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast %struct.S* [[ARRAYIDX1]] to i8*
335 // CHECK1-NEXT:    [[TMP24:%.*]] = bitcast %struct.S* [[VAR]] to i8*
336 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
337 // CHECK1-NEXT:    [[TMP25:%.*]] = load float, float* [[T_VAR1]], align 4
338 // CHECK1-NEXT:    [[CONV2:%.*]] = fptosi float [[TMP25]] to i32
339 // CHECK1-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP10]], i64 0, i64 1
340 // CHECK1-NEXT:    store i32 [[CONV2]], i32* [[ARRAYIDX3]], align 4
341 // CHECK1-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP12]], i64 0, i64 1
342 // CHECK1-NEXT:    [[TMP26:%.*]] = bitcast %struct.S* [[ARRAYIDX4]] to i8*
343 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast %struct.S* [[VAR1]] to i8*
344 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false)
345 // CHECK1-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
346 // CHECK1:       .omp.sections.exit:
347 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
348 // CHECK1:       omp.inner.for.inc:
349 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
350 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP28]], 1
351 // CHECK1-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
352 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
353 // CHECK1:       omp.inner.for.end:
354 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP14]])
355 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
356 // CHECK1-NEXT:    [[TMP30:%.*]] = bitcast float* [[T_VAR]] to i8*
357 // CHECK1-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
358 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
359 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[VAR]] to i8*
360 // CHECK1-NEXT:    store i8* [[TMP32]], i8** [[TMP31]], align 8
361 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
362 // CHECK1-NEXT:    [[TMP34:%.*]] = bitcast %struct.S* [[VAR1]] to i8*
363 // CHECK1-NEXT:    store i8* [[TMP34]], i8** [[TMP33]], align 8
364 // CHECK1-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
365 // CHECK1-NEXT:    [[TMP36:%.*]] = bitcast float* [[T_VAR1]] to i8*
366 // CHECK1-NEXT:    store i8* [[TMP36]], i8** [[TMP35]], align 8
367 // CHECK1-NEXT:    [[TMP37:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
368 // CHECK1-NEXT:    [[TMP38:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP14]], i32 4, i64 32, i8* [[TMP37]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
369 // CHECK1-NEXT:    switch i32 [[TMP38]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
370 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
371 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
372 // CHECK1-NEXT:    ]
373 // CHECK1:       .omp.reduction.case1:
374 // CHECK1-NEXT:    [[TMP39:%.*]] = load float, float* [[TMP2]], align 4
375 // CHECK1-NEXT:    [[TMP40:%.*]] = load float, float* [[T_VAR]], align 4
376 // CHECK1-NEXT:    [[ADD:%.*]] = fadd float [[TMP39]], [[TMP40]]
377 // CHECK1-NEXT:    store float [[ADD]], float* [[TMP2]], align 4
378 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP4]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
379 // CHECK1-NEXT:    [[TMP41:%.*]] = bitcast %struct.S* [[TMP4]] to i8*
380 // CHECK1-NEXT:    [[TMP42:%.*]] = bitcast %struct.S* [[CALL]] to i8*
381 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP41]], i8* align 4 [[TMP42]], i64 4, i1 false)
382 // CHECK1-NEXT:    [[CALL5:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP6]])
383 // CHECK1-NEXT:    [[TOBOOL:%.*]] = fcmp une float [[CALL5]], 0.000000e+00
384 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
385 // CHECK1:       land.rhs:
386 // CHECK1-NEXT:    [[CALL6:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
387 // CHECK1-NEXT:    [[TOBOOL7:%.*]] = fcmp une float [[CALL6]], 0.000000e+00
388 // CHECK1-NEXT:    br label [[LAND_END]]
389 // CHECK1:       land.end:
390 // CHECK1-NEXT:    [[TMP43:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LAND_RHS]] ]
391 // CHECK1-NEXT:    [[CONV8:%.*]] = uitofp i1 [[TMP43]] to float
392 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV8]])
393 // CHECK1-NEXT:    [[TMP44:%.*]] = bitcast %struct.S* [[TMP6]] to i8*
394 // CHECK1-NEXT:    [[TMP45:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
395 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP44]], i8* align 4 [[TMP45]], i64 4, i1 false)
396 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
397 // CHECK1-NEXT:    [[TMP46:%.*]] = load float, float* [[TMP8]], align 4
398 // CHECK1-NEXT:    [[TMP47:%.*]] = load float, float* [[T_VAR1]], align 4
399 // CHECK1-NEXT:    [[CMP9:%.*]] = fcmp olt float [[TMP46]], [[TMP47]]
400 // CHECK1-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
401 // CHECK1:       cond.true:
402 // CHECK1-NEXT:    [[TMP48:%.*]] = load float, float* [[TMP8]], align 4
403 // CHECK1-NEXT:    br label [[COND_END:%.*]]
404 // CHECK1:       cond.false:
405 // CHECK1-NEXT:    [[TMP49:%.*]] = load float, float* [[T_VAR1]], align 4
406 // CHECK1-NEXT:    br label [[COND_END]]
407 // CHECK1:       cond.end:
408 // CHECK1-NEXT:    [[COND:%.*]] = phi float [ [[TMP48]], [[COND_TRUE]] ], [ [[TMP49]], [[COND_FALSE]] ]
409 // CHECK1-NEXT:    store float [[COND]], float* [[TMP8]], align 4
410 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var)
411 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
412 // CHECK1:       .omp.reduction.case2:
413 // CHECK1-NEXT:    [[TMP50:%.*]] = load float, float* [[T_VAR]], align 4
414 // CHECK1-NEXT:    [[TMP51:%.*]] = bitcast float* [[TMP2]] to i32*
415 // CHECK1-NEXT:    [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP51]] monotonic, align 4
416 // CHECK1-NEXT:    br label [[ATOMIC_CONT:%.*]]
417 // CHECK1:       atomic_cont:
418 // CHECK1-NEXT:    [[TMP52:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP60:%.*]], [[ATOMIC_CONT]] ]
419 // CHECK1-NEXT:    [[TMP53:%.*]] = bitcast float* [[ATOMIC_TEMP]] to i32*
420 // CHECK1-NEXT:    [[TMP54:%.*]] = bitcast i32 [[TMP52]] to float
421 // CHECK1-NEXT:    store float [[TMP54]], float* [[TMP]], align 4
422 // CHECK1-NEXT:    [[TMP55:%.*]] = load float, float* [[TMP]], align 4
423 // CHECK1-NEXT:    [[TMP56:%.*]] = load float, float* [[T_VAR]], align 4
424 // CHECK1-NEXT:    [[ADD10:%.*]] = fadd float [[TMP55]], [[TMP56]]
425 // CHECK1-NEXT:    store float [[ADD10]], float* [[ATOMIC_TEMP]], align 4
426 // CHECK1-NEXT:    [[TMP57:%.*]] = load i32, i32* [[TMP53]], align 4
427 // CHECK1-NEXT:    [[TMP58:%.*]] = bitcast float* [[TMP2]] to i32*
428 // CHECK1-NEXT:    [[TMP59:%.*]] = cmpxchg i32* [[TMP58]], i32 [[TMP52]], i32 [[TMP57]] monotonic monotonic, align 4
429 // CHECK1-NEXT:    [[TMP60]] = extractvalue { i32, i1 } [[TMP59]], 0
430 // CHECK1-NEXT:    [[TMP61:%.*]] = extractvalue { i32, i1 } [[TMP59]], 1
431 // CHECK1-NEXT:    br i1 [[TMP61]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
432 // CHECK1:       atomic_exit:
433 // CHECK1-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
434 // CHECK1-NEXT:    [[CALL11:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP4]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
435 // CHECK1-NEXT:    [[TMP62:%.*]] = bitcast %struct.S* [[TMP4]] to i8*
436 // CHECK1-NEXT:    [[TMP63:%.*]] = bitcast %struct.S* [[CALL11]] to i8*
437 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP62]], i8* align 4 [[TMP63]], i64 4, i1 false)
438 // CHECK1-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
439 // CHECK1-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
440 // CHECK1-NEXT:    [[CALL13:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP6]])
441 // CHECK1-NEXT:    [[TOBOOL14:%.*]] = fcmp une float [[CALL13]], 0.000000e+00
442 // CHECK1-NEXT:    br i1 [[TOBOOL14]], label [[LAND_RHS15:%.*]], label [[LAND_END18:%.*]]
443 // CHECK1:       land.rhs15:
444 // CHECK1-NEXT:    [[CALL16:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
445 // CHECK1-NEXT:    [[TOBOOL17:%.*]] = fcmp une float [[CALL16]], 0.000000e+00
446 // CHECK1-NEXT:    br label [[LAND_END18]]
447 // CHECK1:       land.end18:
448 // CHECK1-NEXT:    [[TMP64:%.*]] = phi i1 [ false, [[ATOMIC_EXIT]] ], [ [[TOBOOL17]], [[LAND_RHS15]] ]
449 // CHECK1-NEXT:    [[CONV19:%.*]] = uitofp i1 [[TMP64]] to float
450 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP12]], float [[CONV19]])
451 // CHECK1-NEXT:    [[TMP65:%.*]] = bitcast %struct.S* [[TMP6]] to i8*
452 // CHECK1-NEXT:    [[TMP66:%.*]] = bitcast %struct.S* [[REF_TMP12]] to i8*
453 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP65]], i8* align 4 [[TMP66]], i64 4, i1 false)
454 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP12]]) #[[ATTR4]]
455 // CHECK1-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
456 // CHECK1-NEXT:    [[TMP67:%.*]] = load float, float* [[T_VAR1]], align 4
457 // CHECK1-NEXT:    [[TMP68:%.*]] = bitcast float* [[TMP8]] to i32*
458 // CHECK1-NEXT:    [[ATOMIC_LOAD20:%.*]] = load atomic i32, i32* [[TMP68]] monotonic, align 4
459 // CHECK1-NEXT:    br label [[ATOMIC_CONT21:%.*]]
460 // CHECK1:       atomic_cont21:
461 // CHECK1-NEXT:    [[TMP69:%.*]] = phi i32 [ [[ATOMIC_LOAD20]], [[LAND_END18]] ], [ [[TMP79:%.*]], [[COND_END27:%.*]] ]
462 // CHECK1-NEXT:    [[TMP70:%.*]] = bitcast float* [[ATOMIC_TEMP22]] to i32*
463 // CHECK1-NEXT:    [[TMP71:%.*]] = bitcast i32 [[TMP69]] to float
464 // CHECK1-NEXT:    store float [[TMP71]], float* [[_TMP23]], align 4
465 // CHECK1-NEXT:    [[TMP72:%.*]] = load float, float* [[_TMP23]], align 4
466 // CHECK1-NEXT:    [[TMP73:%.*]] = load float, float* [[T_VAR1]], align 4
467 // CHECK1-NEXT:    [[CMP24:%.*]] = fcmp olt float [[TMP72]], [[TMP73]]
468 // CHECK1-NEXT:    br i1 [[CMP24]], label [[COND_TRUE25:%.*]], label [[COND_FALSE26:%.*]]
469 // CHECK1:       cond.true25:
470 // CHECK1-NEXT:    [[TMP74:%.*]] = load float, float* [[_TMP23]], align 4
471 // CHECK1-NEXT:    br label [[COND_END27]]
472 // CHECK1:       cond.false26:
473 // CHECK1-NEXT:    [[TMP75:%.*]] = load float, float* [[T_VAR1]], align 4
474 // CHECK1-NEXT:    br label [[COND_END27]]
475 // CHECK1:       cond.end27:
476 // CHECK1-NEXT:    [[COND28:%.*]] = phi float [ [[TMP74]], [[COND_TRUE25]] ], [ [[TMP75]], [[COND_FALSE26]] ]
477 // CHECK1-NEXT:    store float [[COND28]], float* [[ATOMIC_TEMP22]], align 4
478 // CHECK1-NEXT:    [[TMP76:%.*]] = load i32, i32* [[TMP70]], align 4
479 // CHECK1-NEXT:    [[TMP77:%.*]] = bitcast float* [[TMP8]] to i32*
480 // CHECK1-NEXT:    [[TMP78:%.*]] = cmpxchg i32* [[TMP77]], i32 [[TMP69]], i32 [[TMP76]] monotonic monotonic, align 4
481 // CHECK1-NEXT:    [[TMP79]] = extractvalue { i32, i1 } [[TMP78]], 0
482 // CHECK1-NEXT:    [[TMP80:%.*]] = extractvalue { i32, i1 } [[TMP78]], 1
483 // CHECK1-NEXT:    br i1 [[TMP80]], label [[ATOMIC_EXIT29:%.*]], label [[ATOMIC_CONT21]]
484 // CHECK1:       atomic_exit29:
485 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var)
486 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
487 // CHECK1:       .omp.reduction.default:
488 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
489 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
490 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP14]])
491 // CHECK1-NEXT:    ret void
492 //
493 //
494 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
495 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
496 // CHECK1-NEXT:  entry:
497 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
498 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
499 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
500 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
501 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
502 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
503 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
504 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
505 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
506 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
507 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
508 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to float*
509 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
510 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
511 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to float*
512 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
513 // CHECK1-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
514 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
515 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
516 // CHECK1-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
517 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S*
518 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
519 // CHECK1-NEXT:    [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
520 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S*
521 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
522 // CHECK1-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
523 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S*
524 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
525 // CHECK1-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
526 // CHECK1-NEXT:    [[TMP26:%.*]] = bitcast i8* [[TMP25]] to float*
527 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
528 // CHECK1-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
529 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to float*
530 // CHECK1-NEXT:    [[TMP30:%.*]] = load float, float* [[TMP11]], align 4
531 // CHECK1-NEXT:    [[TMP31:%.*]] = load float, float* [[TMP8]], align 4
532 // CHECK1-NEXT:    [[ADD:%.*]] = fadd float [[TMP30]], [[TMP31]]
533 // CHECK1-NEXT:    store float [[ADD]], float* [[TMP11]], align 4
534 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP14]])
535 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[TMP17]] to i8*
536 // CHECK1-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[CALL]] to i8*
537 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
538 // CHECK1-NEXT:    [[CALL2:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP23]])
539 // CHECK1-NEXT:    [[TOBOOL:%.*]] = fcmp une float [[CALL2]], 0.000000e+00
540 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
541 // CHECK1:       land.rhs:
542 // CHECK1-NEXT:    [[CALL3:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP20]])
543 // CHECK1-NEXT:    [[TOBOOL4:%.*]] = fcmp une float [[CALL3]], 0.000000e+00
544 // CHECK1-NEXT:    br label [[LAND_END]]
545 // CHECK1:       land.end:
546 // CHECK1-NEXT:    [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
547 // CHECK1-NEXT:    [[CONV:%.*]] = uitofp i1 [[TMP34]] to float
548 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV]])
549 // CHECK1-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP23]] to i8*
550 // CHECK1-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
551 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
552 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
553 // CHECK1-NEXT:    [[TMP37:%.*]] = load float, float* [[TMP29]], align 4
554 // CHECK1-NEXT:    [[TMP38:%.*]] = load float, float* [[TMP26]], align 4
555 // CHECK1-NEXT:    [[CMP:%.*]] = fcmp olt float [[TMP37]], [[TMP38]]
556 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
557 // CHECK1:       cond.true:
558 // CHECK1-NEXT:    [[TMP39:%.*]] = load float, float* [[TMP29]], align 4
559 // CHECK1-NEXT:    br label [[COND_END:%.*]]
560 // CHECK1:       cond.false:
561 // CHECK1-NEXT:    [[TMP40:%.*]] = load float, float* [[TMP26]], align 4
562 // CHECK1-NEXT:    br label [[COND_END]]
563 // CHECK1:       cond.end:
564 // CHECK1-NEXT:    [[COND:%.*]] = phi float [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
565 // CHECK1-NEXT:    store float [[COND]], float* [[TMP29]], align 4
566 // CHECK1-NEXT:    ret void
567 //
568 //
569 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEanERKS0_
570 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] align 2 {
571 // CHECK1-NEXT:  entry:
572 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
573 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
574 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
575 // CHECK1-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
576 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
577 // CHECK1-NEXT:    ret %struct.S* [[THIS1]]
578 //
579 //
580 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEcvfEv
581 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
582 // CHECK1-NEXT:  entry:
583 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
584 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
585 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
586 // CHECK1-NEXT:    ret float 0.000000e+00
587 //
588 //
589 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
590 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
591 // CHECK1-NEXT:  entry:
592 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
593 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
594 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
595 // CHECK1-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
596 // CHECK1-NEXT:    ret void
597 //
598 //
599 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
600 // CHECK1-SAME: () #[[ATTR6]] {
601 // CHECK1-NEXT:  entry:
602 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
603 // CHECK1-NEXT:    [[T:%.*]] = alloca i32, align 4
604 // CHECK1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
605 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
606 // CHECK1-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
607 // CHECK1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
608 // CHECK1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
609 // CHECK1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
610 // CHECK1-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 4
611 // CHECK1-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
612 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
613 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR]], align 4
614 // CHECK1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
615 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
616 // CHECK1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
617 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
618 // CHECK1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
619 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
620 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]], i32 3)
621 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
622 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
623 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[TMP1]], align 8
624 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
625 // CHECK1-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[TMP2]], align 8
626 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
627 // CHECK1-NEXT:    store %struct.S.0* [[VAR1]], %struct.S.0** [[TMP3]], align 8
628 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
629 // CHECK1-NEXT:    store i32* [[T_VAR1]], i32** [[TMP4]], align 8
630 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 4
631 // CHECK1-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP5]], align 8
632 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 5
633 // CHECK1-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP6]], align 8
634 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
635 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
636 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
637 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
638 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
639 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
640 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
641 // CHECK1:       arraydestroy.body:
642 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP7]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
643 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
644 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
645 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
646 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
647 // CHECK1:       arraydestroy.done1:
648 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
649 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[RETVAL]], align 4
650 // CHECK1-NEXT:    ret i32 [[TMP8]]
651 //
652 //
653 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
654 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
655 // CHECK1-NEXT:  entry:
656 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
657 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
658 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
659 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
660 // CHECK1-NEXT:    [[TMP0:%.*]] = load volatile double, double* @g, align 8
661 // CHECK1-NEXT:    [[CONV:%.*]] = fptrunc double [[TMP0]] to float
662 // CHECK1-NEXT:    store float [[CONV]], float* [[F]], align 4
663 // CHECK1-NEXT:    ret void
664 //
665 //
666 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
667 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
668 // CHECK1-NEXT:  entry:
669 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
670 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
671 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
672 // CHECK1-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
673 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
674 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
675 // CHECK1-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
676 // CHECK1-NEXT:    [[CONV:%.*]] = fpext float [[TMP0]] to double
677 // CHECK1-NEXT:    [[TMP1:%.*]] = load volatile double, double* @g, align 8
678 // CHECK1-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
679 // CHECK1-NEXT:    [[CONV2:%.*]] = fptrunc double [[ADD]] to float
680 // CHECK1-NEXT:    store float [[CONV2]], float* [[F]], align 4
681 // CHECK1-NEXT:    ret void
682 //
683 //
684 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
685 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
686 // CHECK1-NEXT:  entry:
687 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
688 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
689 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
690 // CHECK1-NEXT:    ret void
691 //
692 //
693 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
694 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
695 // CHECK1-NEXT:  entry:
696 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
697 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
698 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
699 // CHECK1-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
700 // CHECK1-NEXT:    ret void
701 //
702 //
703 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
704 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
705 // CHECK1-NEXT:  entry:
706 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
707 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
708 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
709 // CHECK1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
710 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
711 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
712 // CHECK1-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
713 // CHECK1-NEXT:    ret void
714 //
715 //
716 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
717 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias [[__CONTEXT:%.*]]) #[[ATTR3]] {
718 // CHECK1-NEXT:  entry:
719 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
720 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
721 // CHECK1-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
722 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
723 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
724 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
725 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
726 // CHECK1-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
727 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
728 // CHECK1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
729 // CHECK1-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 4
730 // CHECK1-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
731 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
732 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
733 // CHECK1-NEXT:    [[REF_TMP8:%.*]] = alloca [[STRUCT_S_0]], align 4
734 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
735 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
736 // CHECK1-NEXT:    store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
737 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
738 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
739 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
740 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 1
741 // CHECK1-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP3]], align 8
742 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 2
743 // CHECK1-NEXT:    [[TMP6:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP5]], align 8
744 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 3
745 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
746 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 4
747 // CHECK1-NEXT:    [[TMP10:%.*]] = load [2 x i32]*, [2 x i32]** [[TMP9]], align 8
748 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 5
749 // CHECK1-NEXT:    [[TMP12:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[TMP11]], align 8
750 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
751 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
752 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
753 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
754 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR]], align 4
755 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
756 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
757 // CHECK1-NEXT:    store i32 2147483647, i32* [[T_VAR1]], align 4
758 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
759 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
760 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP14]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
761 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
762 // CHECK1-NEXT:    [[TMP16:%.*]] = icmp slt i32 [[TMP15]], 1
763 // CHECK1-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP15]], i32 1
764 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
765 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
766 // CHECK1-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
767 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
768 // CHECK1:       omp.inner.for.cond:
769 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
770 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
771 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
772 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
773 // CHECK1:       omp.inner.for.body:
774 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
775 // CHECK1-NEXT:    switch i32 [[TMP21]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
776 // CHECK1-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
777 // CHECK1-NEXT:    i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
778 // CHECK1-NEXT:    ]
779 // CHECK1:       .omp.sections.case:
780 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[T_VAR]], align 4
781 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP10]], i64 0, i64 0
782 // CHECK1-NEXT:    store i32 [[TMP22]], i32* [[ARRAYIDX]], align 4
783 // CHECK1-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
784 // CHECK1:       .omp.sections.case1:
785 // CHECK1-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP12]], i64 0, i64 0
786 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast %struct.S.0* [[ARRAYIDX2]] to i8*
787 // CHECK1-NEXT:    [[TMP24:%.*]] = bitcast %struct.S.0* [[VAR]] to i8*
788 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
789 // CHECK1-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
790 // CHECK1:       .omp.sections.exit:
791 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
792 // CHECK1:       omp.inner.for.inc:
793 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
794 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP25]], 1
795 // CHECK1-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
796 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
797 // CHECK1:       omp.inner.for.end:
798 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP14]])
799 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
800 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast i32* [[T_VAR]] to i8*
801 // CHECK1-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
802 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
803 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast %struct.S.0* [[VAR]] to i8*
804 // CHECK1-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 8
805 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
806 // CHECK1-NEXT:    [[TMP31:%.*]] = bitcast %struct.S.0* [[VAR1]] to i8*
807 // CHECK1-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 8
808 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
809 // CHECK1-NEXT:    [[TMP33:%.*]] = bitcast i32* [[T_VAR1]] to i8*
810 // CHECK1-NEXT:    store i8* [[TMP33]], i8** [[TMP32]], align 8
811 // CHECK1-NEXT:    [[TMP34:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
812 // CHECK1-NEXT:    [[TMP35:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], i32 4, i64 32, i8* [[TMP34]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
813 // CHECK1-NEXT:    switch i32 [[TMP35]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
814 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
815 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
816 // CHECK1-NEXT:    ]
817 // CHECK1:       .omp.reduction.case1:
818 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP2]], align 4
819 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[T_VAR]], align 4
820 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
821 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP2]], align 4
822 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP4]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
823 // CHECK1-NEXT:    [[TMP38:%.*]] = bitcast %struct.S.0* [[TMP4]] to i8*
824 // CHECK1-NEXT:    [[TMP39:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
825 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP38]], i8* align 4 [[TMP39]], i64 4, i1 false)
826 // CHECK1-NEXT:    [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP6]])
827 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[CALL3]], 0
828 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
829 // CHECK1:       land.rhs:
830 // CHECK1-NEXT:    [[CALL4:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
831 // CHECK1-NEXT:    [[TOBOOL5:%.*]] = icmp ne i32 [[CALL4]], 0
832 // CHECK1-NEXT:    br label [[LAND_END]]
833 // CHECK1:       land.end:
834 // CHECK1-NEXT:    [[TMP40:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL5]], [[LAND_RHS]] ]
835 // CHECK1-NEXT:    [[CONV:%.*]] = zext i1 [[TMP40]] to i32
836 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
837 // CHECK1-NEXT:    [[TMP41:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8*
838 // CHECK1-NEXT:    [[TMP42:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
839 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP41]], i8* align 4 [[TMP42]], i64 4, i1 false)
840 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
841 // CHECK1-NEXT:    [[TMP43:%.*]] = load i32, i32* [[TMP8]], align 4
842 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[T_VAR1]], align 4
843 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP43]], [[TMP44]]
844 // CHECK1-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
845 // CHECK1:       cond.true:
846 // CHECK1-NEXT:    [[TMP45:%.*]] = load i32, i32* [[TMP8]], align 4
847 // CHECK1-NEXT:    br label [[COND_END:%.*]]
848 // CHECK1:       cond.false:
849 // CHECK1-NEXT:    [[TMP46:%.*]] = load i32, i32* [[T_VAR1]], align 4
850 // CHECK1-NEXT:    br label [[COND_END]]
851 // CHECK1:       cond.end:
852 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP45]], [[COND_TRUE]] ], [ [[TMP46]], [[COND_FALSE]] ]
853 // CHECK1-NEXT:    store i32 [[COND]], i32* [[TMP8]], align 4
854 // CHECK1-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var)
855 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
856 // CHECK1:       .omp.reduction.case2:
857 // CHECK1-NEXT:    [[TMP47:%.*]] = load i32, i32* [[T_VAR]], align 4
858 // CHECK1-NEXT:    [[TMP48:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP47]] monotonic, align 4
859 // CHECK1-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
860 // CHECK1-NEXT:    [[CALL7:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP4]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
861 // CHECK1-NEXT:    [[TMP49:%.*]] = bitcast %struct.S.0* [[TMP4]] to i8*
862 // CHECK1-NEXT:    [[TMP50:%.*]] = bitcast %struct.S.0* [[CALL7]] to i8*
863 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP49]], i8* align 4 [[TMP50]], i64 4, i1 false)
864 // CHECK1-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
865 // CHECK1-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
866 // CHECK1-NEXT:    [[CALL9:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP6]])
867 // CHECK1-NEXT:    [[TOBOOL10:%.*]] = icmp ne i32 [[CALL9]], 0
868 // CHECK1-NEXT:    br i1 [[TOBOOL10]], label [[LAND_RHS11:%.*]], label [[LAND_END14:%.*]]
869 // CHECK1:       land.rhs11:
870 // CHECK1-NEXT:    [[CALL12:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
871 // CHECK1-NEXT:    [[TOBOOL13:%.*]] = icmp ne i32 [[CALL12]], 0
872 // CHECK1-NEXT:    br label [[LAND_END14]]
873 // CHECK1:       land.end14:
874 // CHECK1-NEXT:    [[TMP51:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL13]], [[LAND_RHS11]] ]
875 // CHECK1-NEXT:    [[CONV15:%.*]] = zext i1 [[TMP51]] to i32
876 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP8]], i32 [[CONV15]])
877 // CHECK1-NEXT:    [[TMP52:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8*
878 // CHECK1-NEXT:    [[TMP53:%.*]] = bitcast %struct.S.0* [[REF_TMP8]] to i8*
879 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP52]], i8* align 4 [[TMP53]], i64 4, i1 false)
880 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP8]]) #[[ATTR4]]
881 // CHECK1-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
882 // CHECK1-NEXT:    [[TMP54:%.*]] = load i32, i32* [[T_VAR1]], align 4
883 // CHECK1-NEXT:    [[TMP55:%.*]] = atomicrmw min i32* [[TMP8]], i32 [[TMP54]] monotonic, align 4
884 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
885 // CHECK1:       .omp.reduction.default:
886 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
887 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
888 // CHECK1-NEXT:    ret void
889 //
890 //
891 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
892 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
893 // CHECK1-NEXT:  entry:
894 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
895 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
896 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
897 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
898 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
899 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
900 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
901 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
902 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
903 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
904 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
905 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
906 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
907 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
908 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
909 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
910 // CHECK1-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
911 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
912 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
913 // CHECK1-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
914 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S.0*
915 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
916 // CHECK1-NEXT:    [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
917 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S.0*
918 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
919 // CHECK1-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
920 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S.0*
921 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
922 // CHECK1-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
923 // CHECK1-NEXT:    [[TMP26:%.*]] = bitcast i8* [[TMP25]] to i32*
924 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
925 // CHECK1-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
926 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to i32*
927 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP11]], align 4
928 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[TMP8]], align 4
929 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
930 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
931 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]])
932 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP17]] to i8*
933 // CHECK1-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
934 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
935 // CHECK1-NEXT:    [[CALL2:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP23]])
936 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[CALL2]], 0
937 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
938 // CHECK1:       land.rhs:
939 // CHECK1-NEXT:    [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP20]])
940 // CHECK1-NEXT:    [[TOBOOL4:%.*]] = icmp ne i32 [[CALL3]], 0
941 // CHECK1-NEXT:    br label [[LAND_END]]
942 // CHECK1:       land.end:
943 // CHECK1-NEXT:    [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
944 // CHECK1-NEXT:    [[CONV:%.*]] = zext i1 [[TMP34]] to i32
945 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
946 // CHECK1-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP23]] to i8*
947 // CHECK1-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
948 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
949 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
950 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP29]], align 4
951 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[TMP26]], align 4
952 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]]
953 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
954 // CHECK1:       cond.true:
955 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[TMP29]], align 4
956 // CHECK1-NEXT:    br label [[COND_END:%.*]]
957 // CHECK1:       cond.false:
958 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32, i32* [[TMP26]], align 4
959 // CHECK1-NEXT:    br label [[COND_END]]
960 // CHECK1:       cond.end:
961 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
962 // CHECK1-NEXT:    store i32 [[COND]], i32* [[TMP29]], align 4
963 // CHECK1-NEXT:    ret void
964 //
965 //
966 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEanERKS0_
967 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6]] align 2 {
968 // CHECK1-NEXT:  entry:
969 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
970 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
971 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
972 // CHECK1-NEXT:    store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
973 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
974 // CHECK1-NEXT:    ret %struct.S.0* [[THIS1]]
975 //
976 //
977 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEcviEv
978 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
979 // CHECK1-NEXT:  entry:
980 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
981 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
982 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
983 // CHECK1-NEXT:    ret i32 0
984 //
985 //
986 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
987 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
988 // CHECK1-NEXT:  entry:
989 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
990 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
991 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
992 // CHECK1-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
993 // CHECK1-NEXT:    ret void
994 //
995 //
996 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
997 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
998 // CHECK1-NEXT:  entry:
999 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1000 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1001 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1002 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1003 // CHECK1-NEXT:    [[TMP0:%.*]] = load volatile double, double* @g, align 8
1004 // CHECK1-NEXT:    [[CONV:%.*]] = fptosi double [[TMP0]] to i32
1005 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[F]], align 4
1006 // CHECK1-NEXT:    ret void
1007 //
1008 //
1009 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1010 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1011 // CHECK1-NEXT:  entry:
1012 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1013 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1014 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1015 // CHECK1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1016 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1017 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1018 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1019 // CHECK1-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to double
1020 // CHECK1-NEXT:    [[TMP1:%.*]] = load volatile double, double* @g, align 8
1021 // CHECK1-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
1022 // CHECK1-NEXT:    [[CONV2:%.*]] = fptosi double [[ADD]] to i32
1023 // CHECK1-NEXT:    store i32 [[CONV2]], i32* [[F]], align 4
1024 // CHECK1-NEXT:    ret void
1025 //
1026 //
1027 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1028 // CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1029 // CHECK1-NEXT:  entry:
1030 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1031 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1032 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1033 // CHECK1-NEXT:    ret void
1034 //
1035 //
1036 // CHECK2-LABEL: define {{[^@]+}}@main
1037 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
1038 // CHECK2-NEXT:  entry:
1039 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1040 // CHECK2-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1041 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca float, align 4
1042 // CHECK2-NEXT:    [[T_VAR1:%.*]] = alloca float, align 4
1043 // CHECK2-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1044 // CHECK2-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
1045 // CHECK2-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
1046 // CHECK2-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
1047 // CHECK2-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
1048 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1049 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
1050 // CHECK2-NEXT:    store float 0.000000e+00, float* [[T_VAR]], align 4
1051 // CHECK2-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1052 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
1053 // CHECK2-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
1054 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
1055 // CHECK2-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
1056 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
1057 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]], float 3.000000e+00)
1058 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
1059 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
1060 // CHECK2-NEXT:    store float* [[T_VAR]], float** [[TMP1]], align 8
1061 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
1062 // CHECK2-NEXT:    store %struct.S* [[VAR]], %struct.S** [[TMP2]], align 8
1063 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
1064 // CHECK2-NEXT:    store %struct.S* [[VAR1]], %struct.S** [[TMP3]], align 8
1065 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
1066 // CHECK2-NEXT:    store float* [[T_VAR1]], float** [[TMP4]], align 8
1067 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 4
1068 // CHECK2-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP5]], align 8
1069 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 5
1070 // CHECK2-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[TMP6]], align 8
1071 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
1072 // CHECK2-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
1073 // CHECK2-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
1074 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4:[0-9]+]]
1075 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
1076 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1077 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
1078 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1079 // CHECK2:       arraydestroy.body:
1080 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP7]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1081 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1082 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1083 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1084 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1085 // CHECK2:       arraydestroy.done1:
1086 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1087 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[RETVAL]], align 4
1088 // CHECK2-NEXT:    ret i32 [[TMP8]]
1089 //
1090 //
1091 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
1092 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
1093 // CHECK2-NEXT:  entry:
1094 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1095 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1096 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1097 // CHECK2-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
1098 // CHECK2-NEXT:    ret void
1099 //
1100 //
1101 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
1102 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1103 // CHECK2-NEXT:  entry:
1104 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1105 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1106 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1107 // CHECK2-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1108 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1109 // CHECK2-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1110 // CHECK2-NEXT:    call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
1111 // CHECK2-NEXT:    ret void
1112 //
1113 //
1114 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
1115 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
1116 // CHECK2-NEXT:  entry:
1117 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1118 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1119 // CHECK2-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
1120 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
1121 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
1122 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
1123 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
1124 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
1125 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca float, align 4
1126 // CHECK2-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1127 // CHECK2-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
1128 // CHECK2-NEXT:    [[T_VAR1:%.*]] = alloca float, align 4
1129 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
1130 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
1131 // CHECK2-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
1132 // CHECK2-NEXT:    [[TMP:%.*]] = alloca float, align 4
1133 // CHECK2-NEXT:    [[REF_TMP12:%.*]] = alloca [[STRUCT_S]], align 4
1134 // CHECK2-NEXT:    [[ATOMIC_TEMP22:%.*]] = alloca float, align 4
1135 // CHECK2-NEXT:    [[_TMP23:%.*]] = alloca float, align 4
1136 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1137 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1138 // CHECK2-NEXT:    store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
1139 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
1140 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
1141 // CHECK2-NEXT:    [[TMP2:%.*]] = load float*, float** [[TMP1]], align 8
1142 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
1143 // CHECK2-NEXT:    [[TMP4:%.*]] = load %struct.S*, %struct.S** [[TMP3]], align 8
1144 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 2
1145 // CHECK2-NEXT:    [[TMP6:%.*]] = load %struct.S*, %struct.S** [[TMP5]], align 8
1146 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 3
1147 // CHECK2-NEXT:    [[TMP8:%.*]] = load float*, float** [[TMP7]], align 8
1148 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 4
1149 // CHECK2-NEXT:    [[TMP10:%.*]] = load [2 x i32]*, [2 x i32]** [[TMP9]], align 8
1150 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 5
1151 // CHECK2-NEXT:    [[TMP12:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[TMP11]], align 8
1152 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1153 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1154 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
1155 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
1156 // CHECK2-NEXT:    store float 0.000000e+00, float* [[T_VAR]], align 4
1157 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
1158 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
1159 // CHECK2-NEXT:    store float 0x47EFFFFFE0000000, float* [[T_VAR1]], align 4
1160 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1161 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
1162 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP14]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
1163 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1164 // CHECK2-NEXT:    [[TMP16:%.*]] = icmp slt i32 [[TMP15]], 0
1165 // CHECK2-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP15]], i32 0
1166 // CHECK2-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
1167 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1168 // CHECK2-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1169 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1170 // CHECK2:       omp.inner.for.cond:
1171 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1172 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1173 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
1174 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1175 // CHECK2:       omp.inner.for.body:
1176 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1177 // CHECK2-NEXT:    switch i32 [[TMP21]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
1178 // CHECK2-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
1179 // CHECK2-NEXT:    ]
1180 // CHECK2:       .omp.sections.case:
1181 // CHECK2-NEXT:    [[TMP22:%.*]] = load float, float* [[T_VAR]], align 4
1182 // CHECK2-NEXT:    [[CONV:%.*]] = fptosi float [[TMP22]] to i32
1183 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP10]], i64 0, i64 0
1184 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
1185 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP12]], i64 0, i64 0
1186 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast %struct.S* [[ARRAYIDX1]] to i8*
1187 // CHECK2-NEXT:    [[TMP24:%.*]] = bitcast %struct.S* [[VAR]] to i8*
1188 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
1189 // CHECK2-NEXT:    [[TMP25:%.*]] = load float, float* [[T_VAR1]], align 4
1190 // CHECK2-NEXT:    [[CONV2:%.*]] = fptosi float [[TMP25]] to i32
1191 // CHECK2-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP10]], i64 0, i64 1
1192 // CHECK2-NEXT:    store i32 [[CONV2]], i32* [[ARRAYIDX3]], align 4
1193 // CHECK2-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP12]], i64 0, i64 1
1194 // CHECK2-NEXT:    [[TMP26:%.*]] = bitcast %struct.S* [[ARRAYIDX4]] to i8*
1195 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast %struct.S* [[VAR1]] to i8*
1196 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false)
1197 // CHECK2-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1198 // CHECK2:       .omp.sections.exit:
1199 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1200 // CHECK2:       omp.inner.for.inc:
1201 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1202 // CHECK2-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP28]], 1
1203 // CHECK2-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1204 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
1205 // CHECK2:       omp.inner.for.end:
1206 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP14]])
1207 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1208 // CHECK2-NEXT:    [[TMP30:%.*]] = bitcast float* [[T_VAR]] to i8*
1209 // CHECK2-NEXT:    store i8* [[TMP30]], i8** [[TMP29]], align 8
1210 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
1211 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[VAR]] to i8*
1212 // CHECK2-NEXT:    store i8* [[TMP32]], i8** [[TMP31]], align 8
1213 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
1214 // CHECK2-NEXT:    [[TMP34:%.*]] = bitcast %struct.S* [[VAR1]] to i8*
1215 // CHECK2-NEXT:    store i8* [[TMP34]], i8** [[TMP33]], align 8
1216 // CHECK2-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
1217 // CHECK2-NEXT:    [[TMP36:%.*]] = bitcast float* [[T_VAR1]] to i8*
1218 // CHECK2-NEXT:    store i8* [[TMP36]], i8** [[TMP35]], align 8
1219 // CHECK2-NEXT:    [[TMP37:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1220 // CHECK2-NEXT:    [[TMP38:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP14]], i32 4, i64 32, i8* [[TMP37]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1221 // CHECK2-NEXT:    switch i32 [[TMP38]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1222 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1223 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1224 // CHECK2-NEXT:    ]
1225 // CHECK2:       .omp.reduction.case1:
1226 // CHECK2-NEXT:    [[TMP39:%.*]] = load float, float* [[TMP2]], align 4
1227 // CHECK2-NEXT:    [[TMP40:%.*]] = load float, float* [[T_VAR]], align 4
1228 // CHECK2-NEXT:    [[ADD:%.*]] = fadd float [[TMP39]], [[TMP40]]
1229 // CHECK2-NEXT:    store float [[ADD]], float* [[TMP2]], align 4
1230 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP4]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
1231 // CHECK2-NEXT:    [[TMP41:%.*]] = bitcast %struct.S* [[TMP4]] to i8*
1232 // CHECK2-NEXT:    [[TMP42:%.*]] = bitcast %struct.S* [[CALL]] to i8*
1233 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP41]], i8* align 4 [[TMP42]], i64 4, i1 false)
1234 // CHECK2-NEXT:    [[CALL5:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP6]])
1235 // CHECK2-NEXT:    [[TOBOOL:%.*]] = fcmp une float [[CALL5]], 0.000000e+00
1236 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
1237 // CHECK2:       land.rhs:
1238 // CHECK2-NEXT:    [[CALL6:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
1239 // CHECK2-NEXT:    [[TOBOOL7:%.*]] = fcmp une float [[CALL6]], 0.000000e+00
1240 // CHECK2-NEXT:    br label [[LAND_END]]
1241 // CHECK2:       land.end:
1242 // CHECK2-NEXT:    [[TMP43:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LAND_RHS]] ]
1243 // CHECK2-NEXT:    [[CONV8:%.*]] = uitofp i1 [[TMP43]] to float
1244 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV8]])
1245 // CHECK2-NEXT:    [[TMP44:%.*]] = bitcast %struct.S* [[TMP6]] to i8*
1246 // CHECK2-NEXT:    [[TMP45:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
1247 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP44]], i8* align 4 [[TMP45]], i64 4, i1 false)
1248 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
1249 // CHECK2-NEXT:    [[TMP46:%.*]] = load float, float* [[TMP8]], align 4
1250 // CHECK2-NEXT:    [[TMP47:%.*]] = load float, float* [[T_VAR1]], align 4
1251 // CHECK2-NEXT:    [[CMP9:%.*]] = fcmp olt float [[TMP46]], [[TMP47]]
1252 // CHECK2-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1253 // CHECK2:       cond.true:
1254 // CHECK2-NEXT:    [[TMP48:%.*]] = load float, float* [[TMP8]], align 4
1255 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1256 // CHECK2:       cond.false:
1257 // CHECK2-NEXT:    [[TMP49:%.*]] = load float, float* [[T_VAR1]], align 4
1258 // CHECK2-NEXT:    br label [[COND_END]]
1259 // CHECK2:       cond.end:
1260 // CHECK2-NEXT:    [[COND:%.*]] = phi float [ [[TMP48]], [[COND_TRUE]] ], [ [[TMP49]], [[COND_FALSE]] ]
1261 // CHECK2-NEXT:    store float [[COND]], float* [[TMP8]], align 4
1262 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1263 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1264 // CHECK2:       .omp.reduction.case2:
1265 // CHECK2-NEXT:    [[TMP50:%.*]] = load float, float* [[T_VAR]], align 4
1266 // CHECK2-NEXT:    [[TMP51:%.*]] = bitcast float* [[TMP2]] to i32*
1267 // CHECK2-NEXT:    [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP51]] monotonic, align 4
1268 // CHECK2-NEXT:    br label [[ATOMIC_CONT:%.*]]
1269 // CHECK2:       atomic_cont:
1270 // CHECK2-NEXT:    [[TMP52:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP60:%.*]], [[ATOMIC_CONT]] ]
1271 // CHECK2-NEXT:    [[TMP53:%.*]] = bitcast float* [[ATOMIC_TEMP]] to i32*
1272 // CHECK2-NEXT:    [[TMP54:%.*]] = bitcast i32 [[TMP52]] to float
1273 // CHECK2-NEXT:    store float [[TMP54]], float* [[TMP]], align 4
1274 // CHECK2-NEXT:    [[TMP55:%.*]] = load float, float* [[TMP]], align 4
1275 // CHECK2-NEXT:    [[TMP56:%.*]] = load float, float* [[T_VAR]], align 4
1276 // CHECK2-NEXT:    [[ADD10:%.*]] = fadd float [[TMP55]], [[TMP56]]
1277 // CHECK2-NEXT:    store float [[ADD10]], float* [[ATOMIC_TEMP]], align 4
1278 // CHECK2-NEXT:    [[TMP57:%.*]] = load i32, i32* [[TMP53]], align 4
1279 // CHECK2-NEXT:    [[TMP58:%.*]] = bitcast float* [[TMP2]] to i32*
1280 // CHECK2-NEXT:    [[TMP59:%.*]] = cmpxchg i32* [[TMP58]], i32 [[TMP52]], i32 [[TMP57]] monotonic monotonic, align 4
1281 // CHECK2-NEXT:    [[TMP60]] = extractvalue { i32, i1 } [[TMP59]], 0
1282 // CHECK2-NEXT:    [[TMP61:%.*]] = extractvalue { i32, i1 } [[TMP59]], 1
1283 // CHECK2-NEXT:    br i1 [[TMP61]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
1284 // CHECK2:       atomic_exit:
1285 // CHECK2-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1286 // CHECK2-NEXT:    [[CALL11:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP4]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR]])
1287 // CHECK2-NEXT:    [[TMP62:%.*]] = bitcast %struct.S* [[TMP4]] to i8*
1288 // CHECK2-NEXT:    [[TMP63:%.*]] = bitcast %struct.S* [[CALL11]] to i8*
1289 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP62]], i8* align 4 [[TMP63]], i64 4, i1 false)
1290 // CHECK2-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1291 // CHECK2-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1292 // CHECK2-NEXT:    [[CALL13:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP6]])
1293 // CHECK2-NEXT:    [[TOBOOL14:%.*]] = fcmp une float [[CALL13]], 0.000000e+00
1294 // CHECK2-NEXT:    br i1 [[TOBOOL14]], label [[LAND_RHS15:%.*]], label [[LAND_END18:%.*]]
1295 // CHECK2:       land.rhs15:
1296 // CHECK2-NEXT:    [[CALL16:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
1297 // CHECK2-NEXT:    [[TOBOOL17:%.*]] = fcmp une float [[CALL16]], 0.000000e+00
1298 // CHECK2-NEXT:    br label [[LAND_END18]]
1299 // CHECK2:       land.end18:
1300 // CHECK2-NEXT:    [[TMP64:%.*]] = phi i1 [ false, [[ATOMIC_EXIT]] ], [ [[TOBOOL17]], [[LAND_RHS15]] ]
1301 // CHECK2-NEXT:    [[CONV19:%.*]] = uitofp i1 [[TMP64]] to float
1302 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP12]], float [[CONV19]])
1303 // CHECK2-NEXT:    [[TMP65:%.*]] = bitcast %struct.S* [[TMP6]] to i8*
1304 // CHECK2-NEXT:    [[TMP66:%.*]] = bitcast %struct.S* [[REF_TMP12]] to i8*
1305 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP65]], i8* align 4 [[TMP66]], i64 4, i1 false)
1306 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP12]]) #[[ATTR4]]
1307 // CHECK2-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1308 // CHECK2-NEXT:    [[TMP67:%.*]] = load float, float* [[T_VAR1]], align 4
1309 // CHECK2-NEXT:    [[TMP68:%.*]] = bitcast float* [[TMP8]] to i32*
1310 // CHECK2-NEXT:    [[ATOMIC_LOAD20:%.*]] = load atomic i32, i32* [[TMP68]] monotonic, align 4
1311 // CHECK2-NEXT:    br label [[ATOMIC_CONT21:%.*]]
1312 // CHECK2:       atomic_cont21:
1313 // CHECK2-NEXT:    [[TMP69:%.*]] = phi i32 [ [[ATOMIC_LOAD20]], [[LAND_END18]] ], [ [[TMP79:%.*]], [[COND_END27:%.*]] ]
1314 // CHECK2-NEXT:    [[TMP70:%.*]] = bitcast float* [[ATOMIC_TEMP22]] to i32*
1315 // CHECK2-NEXT:    [[TMP71:%.*]] = bitcast i32 [[TMP69]] to float
1316 // CHECK2-NEXT:    store float [[TMP71]], float* [[_TMP23]], align 4
1317 // CHECK2-NEXT:    [[TMP72:%.*]] = load float, float* [[_TMP23]], align 4
1318 // CHECK2-NEXT:    [[TMP73:%.*]] = load float, float* [[T_VAR1]], align 4
1319 // CHECK2-NEXT:    [[CMP24:%.*]] = fcmp olt float [[TMP72]], [[TMP73]]
1320 // CHECK2-NEXT:    br i1 [[CMP24]], label [[COND_TRUE25:%.*]], label [[COND_FALSE26:%.*]]
1321 // CHECK2:       cond.true25:
1322 // CHECK2-NEXT:    [[TMP74:%.*]] = load float, float* [[_TMP23]], align 4
1323 // CHECK2-NEXT:    br label [[COND_END27]]
1324 // CHECK2:       cond.false26:
1325 // CHECK2-NEXT:    [[TMP75:%.*]] = load float, float* [[T_VAR1]], align 4
1326 // CHECK2-NEXT:    br label [[COND_END27]]
1327 // CHECK2:       cond.end27:
1328 // CHECK2-NEXT:    [[COND28:%.*]] = phi float [ [[TMP74]], [[COND_TRUE25]] ], [ [[TMP75]], [[COND_FALSE26]] ]
1329 // CHECK2-NEXT:    store float [[COND28]], float* [[ATOMIC_TEMP22]], align 4
1330 // CHECK2-NEXT:    [[TMP76:%.*]] = load i32, i32* [[TMP70]], align 4
1331 // CHECK2-NEXT:    [[TMP77:%.*]] = bitcast float* [[TMP8]] to i32*
1332 // CHECK2-NEXT:    [[TMP78:%.*]] = cmpxchg i32* [[TMP77]], i32 [[TMP69]], i32 [[TMP76]] monotonic monotonic, align 4
1333 // CHECK2-NEXT:    [[TMP79]] = extractvalue { i32, i1 } [[TMP78]], 0
1334 // CHECK2-NEXT:    [[TMP80:%.*]] = extractvalue { i32, i1 } [[TMP78]], 1
1335 // CHECK2-NEXT:    br i1 [[TMP80]], label [[ATOMIC_EXIT29:%.*]], label [[ATOMIC_CONT21]]
1336 // CHECK2:       atomic_exit29:
1337 // CHECK2-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1338 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1339 // CHECK2:       .omp.reduction.default:
1340 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
1341 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
1342 // CHECK2-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP14]])
1343 // CHECK2-NEXT:    ret void
1344 //
1345 //
1346 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1347 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
1348 // CHECK2-NEXT:  entry:
1349 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1350 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1351 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1352 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1353 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1354 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1355 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
1356 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1357 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
1358 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
1359 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1360 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to float*
1361 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
1362 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1363 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to float*
1364 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
1365 // CHECK2-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
1366 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
1367 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
1368 // CHECK2-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
1369 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S*
1370 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
1371 // CHECK2-NEXT:    [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
1372 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S*
1373 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
1374 // CHECK2-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
1375 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S*
1376 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
1377 // CHECK2-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
1378 // CHECK2-NEXT:    [[TMP26:%.*]] = bitcast i8* [[TMP25]] to float*
1379 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
1380 // CHECK2-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
1381 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to float*
1382 // CHECK2-NEXT:    [[TMP30:%.*]] = load float, float* [[TMP11]], align 4
1383 // CHECK2-NEXT:    [[TMP31:%.*]] = load float, float* [[TMP8]], align 4
1384 // CHECK2-NEXT:    [[ADD:%.*]] = fadd float [[TMP30]], [[TMP31]]
1385 // CHECK2-NEXT:    store float [[ADD]], float* [[TMP11]], align 4
1386 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP14]])
1387 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast %struct.S* [[TMP17]] to i8*
1388 // CHECK2-NEXT:    [[TMP33:%.*]] = bitcast %struct.S* [[CALL]] to i8*
1389 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
1390 // CHECK2-NEXT:    [[CALL2:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP23]])
1391 // CHECK2-NEXT:    [[TOBOOL:%.*]] = fcmp une float [[CALL2]], 0.000000e+00
1392 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
1393 // CHECK2:       land.rhs:
1394 // CHECK2-NEXT:    [[CALL3:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP20]])
1395 // CHECK2-NEXT:    [[TOBOOL4:%.*]] = fcmp une float [[CALL3]], 0.000000e+00
1396 // CHECK2-NEXT:    br label [[LAND_END]]
1397 // CHECK2:       land.end:
1398 // CHECK2-NEXT:    [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
1399 // CHECK2-NEXT:    [[CONV:%.*]] = uitofp i1 [[TMP34]] to float
1400 // CHECK2-NEXT:    call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV]])
1401 // CHECK2-NEXT:    [[TMP35:%.*]] = bitcast %struct.S* [[TMP23]] to i8*
1402 // CHECK2-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
1403 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
1404 // CHECK2-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
1405 // CHECK2-NEXT:    [[TMP37:%.*]] = load float, float* [[TMP29]], align 4
1406 // CHECK2-NEXT:    [[TMP38:%.*]] = load float, float* [[TMP26]], align 4
1407 // CHECK2-NEXT:    [[CMP:%.*]] = fcmp olt float [[TMP37]], [[TMP38]]
1408 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1409 // CHECK2:       cond.true:
1410 // CHECK2-NEXT:    [[TMP39:%.*]] = load float, float* [[TMP29]], align 4
1411 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1412 // CHECK2:       cond.false:
1413 // CHECK2-NEXT:    [[TMP40:%.*]] = load float, float* [[TMP26]], align 4
1414 // CHECK2-NEXT:    br label [[COND_END]]
1415 // CHECK2:       cond.end:
1416 // CHECK2-NEXT:    [[COND:%.*]] = phi float [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
1417 // CHECK2-NEXT:    store float [[COND]], float* [[TMP29]], align 4
1418 // CHECK2-NEXT:    ret void
1419 //
1420 //
1421 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEanERKS0_
1422 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] align 2 {
1423 // CHECK2-NEXT:  entry:
1424 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1425 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
1426 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1427 // CHECK2-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
1428 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1429 // CHECK2-NEXT:    ret %struct.S* [[THIS1]]
1430 //
1431 //
1432 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEcvfEv
1433 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
1434 // CHECK2-NEXT:  entry:
1435 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1436 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1437 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1438 // CHECK2-NEXT:    ret float 0.000000e+00
1439 //
1440 //
1441 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
1442 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1443 // CHECK2-NEXT:  entry:
1444 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1445 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1446 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1447 // CHECK2-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
1448 // CHECK2-NEXT:    ret void
1449 //
1450 //
1451 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1452 // CHECK2-SAME: () #[[ATTR6]] {
1453 // CHECK2-NEXT:  entry:
1454 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1455 // CHECK2-NEXT:    [[T:%.*]] = alloca i32, align 4
1456 // CHECK2-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1457 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1458 // CHECK2-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
1459 // CHECK2-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1460 // CHECK2-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1461 // CHECK2-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
1462 // CHECK2-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 4
1463 // CHECK2-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
1464 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
1465 // CHECK2-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1466 // CHECK2-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1467 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
1468 // CHECK2-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
1469 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
1470 // CHECK2-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
1471 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
1472 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]], i32 3)
1473 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
1474 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
1475 // CHECK2-NEXT:    store i32* [[T_VAR]], i32** [[TMP1]], align 8
1476 // CHECK2-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
1477 // CHECK2-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[TMP2]], align 8
1478 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
1479 // CHECK2-NEXT:    store %struct.S.0* [[VAR1]], %struct.S.0** [[TMP3]], align 8
1480 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 3
1481 // CHECK2-NEXT:    store i32* [[T_VAR1]], i32** [[TMP4]], align 8
1482 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 4
1483 // CHECK2-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[TMP5]], align 8
1484 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 5
1485 // CHECK2-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[TMP6]], align 8
1486 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
1487 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1488 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
1489 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
1490 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1491 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
1492 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1493 // CHECK2:       arraydestroy.body:
1494 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP7]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1495 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1496 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1497 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1498 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1499 // CHECK2:       arraydestroy.done1:
1500 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1501 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[RETVAL]], align 4
1502 // CHECK2-NEXT:    ret i32 [[TMP8]]
1503 //
1504 //
1505 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
1506 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1507 // CHECK2-NEXT:  entry:
1508 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1509 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1510 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1511 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1512 // CHECK2-NEXT:    [[TMP0:%.*]] = load volatile double, double* @g, align 8
1513 // CHECK2-NEXT:    [[CONV:%.*]] = fptrunc double [[TMP0]] to float
1514 // CHECK2-NEXT:    store float [[CONV]], float* [[F]], align 4
1515 // CHECK2-NEXT:    ret void
1516 //
1517 //
1518 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
1519 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1520 // CHECK2-NEXT:  entry:
1521 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1522 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
1523 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1524 // CHECK2-NEXT:    store float [[A]], float* [[A_ADDR]], align 4
1525 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1526 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1527 // CHECK2-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1528 // CHECK2-NEXT:    [[CONV:%.*]] = fpext float [[TMP0]] to double
1529 // CHECK2-NEXT:    [[TMP1:%.*]] = load volatile double, double* @g, align 8
1530 // CHECK2-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
1531 // CHECK2-NEXT:    [[CONV2:%.*]] = fptrunc double [[ADD]] to float
1532 // CHECK2-NEXT:    store float [[CONV2]], float* [[F]], align 4
1533 // CHECK2-NEXT:    ret void
1534 //
1535 //
1536 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
1537 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1538 // CHECK2-NEXT:  entry:
1539 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1540 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1541 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1542 // CHECK2-NEXT:    ret void
1543 //
1544 //
1545 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
1546 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1547 // CHECK2-NEXT:  entry:
1548 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1549 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1550 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1551 // CHECK2-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
1552 // CHECK2-NEXT:    ret void
1553 //
1554 //
1555 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
1556 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1557 // CHECK2-NEXT:  entry:
1558 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1559 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1560 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1561 // CHECK2-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1562 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1563 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1564 // CHECK2-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
1565 // CHECK2-NEXT:    ret void
1566 //
1567 //
1568 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
1569 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias [[__CONTEXT:%.*]]) #[[ATTR3]] {
1570 // CHECK2-NEXT:  entry:
1571 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1572 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1573 // CHECK2-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
1574 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
1575 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
1576 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
1577 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
1578 // CHECK2-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
1579 // CHECK2-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1580 // CHECK2-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1581 // CHECK2-NEXT:    [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 4
1582 // CHECK2-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
1583 // CHECK2-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
1584 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
1585 // CHECK2-NEXT:    [[REF_TMP8:%.*]] = alloca [[STRUCT_S_0]], align 4
1586 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1587 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1588 // CHECK2-NEXT:    store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
1589 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
1590 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
1591 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
1592 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 1
1593 // CHECK2-NEXT:    [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP3]], align 8
1594 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 2
1595 // CHECK2-NEXT:    [[TMP6:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP5]], align 8
1596 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 3
1597 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
1598 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 4
1599 // CHECK2-NEXT:    [[TMP10:%.*]] = load [2 x i32]*, [2 x i32]** [[TMP9]], align 8
1600 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 5
1601 // CHECK2-NEXT:    [[TMP12:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[TMP11]], align 8
1602 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1603 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1604 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
1605 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
1606 // CHECK2-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1607 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
1608 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
1609 // CHECK2-NEXT:    store i32 2147483647, i32* [[T_VAR1]], align 4
1610 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1611 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
1612 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP14]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
1613 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1614 // CHECK2-NEXT:    [[TMP16:%.*]] = icmp slt i32 [[TMP15]], 1
1615 // CHECK2-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP15]], i32 1
1616 // CHECK2-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
1617 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1618 // CHECK2-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1619 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1620 // CHECK2:       omp.inner.for.cond:
1621 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1622 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1623 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
1624 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1625 // CHECK2:       omp.inner.for.body:
1626 // CHECK2-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1627 // CHECK2-NEXT:    switch i32 [[TMP21]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
1628 // CHECK2-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
1629 // CHECK2-NEXT:    i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
1630 // CHECK2-NEXT:    ]
1631 // CHECK2:       .omp.sections.case:
1632 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[T_VAR]], align 4
1633 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP10]], i64 0, i64 0
1634 // CHECK2-NEXT:    store i32 [[TMP22]], i32* [[ARRAYIDX]], align 4
1635 // CHECK2-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1636 // CHECK2:       .omp.sections.case1:
1637 // CHECK2-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP12]], i64 0, i64 0
1638 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast %struct.S.0* [[ARRAYIDX2]] to i8*
1639 // CHECK2-NEXT:    [[TMP24:%.*]] = bitcast %struct.S.0* [[VAR]] to i8*
1640 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
1641 // CHECK2-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1642 // CHECK2:       .omp.sections.exit:
1643 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1644 // CHECK2:       omp.inner.for.inc:
1645 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1646 // CHECK2-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP25]], 1
1647 // CHECK2-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1648 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
1649 // CHECK2:       omp.inner.for.end:
1650 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP14]])
1651 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1652 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast i32* [[T_VAR]] to i8*
1653 // CHECK2-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
1654 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
1655 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast %struct.S.0* [[VAR]] to i8*
1656 // CHECK2-NEXT:    store i8* [[TMP29]], i8** [[TMP28]], align 8
1657 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
1658 // CHECK2-NEXT:    [[TMP31:%.*]] = bitcast %struct.S.0* [[VAR1]] to i8*
1659 // CHECK2-NEXT:    store i8* [[TMP31]], i8** [[TMP30]], align 8
1660 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
1661 // CHECK2-NEXT:    [[TMP33:%.*]] = bitcast i32* [[T_VAR1]] to i8*
1662 // CHECK2-NEXT:    store i8* [[TMP33]], i8** [[TMP32]], align 8
1663 // CHECK2-NEXT:    [[TMP34:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1664 // CHECK2-NEXT:    [[TMP35:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], i32 4, i64 32, i8* [[TMP34]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
1665 // CHECK2-NEXT:    switch i32 [[TMP35]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1666 // CHECK2-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1667 // CHECK2-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1668 // CHECK2-NEXT:    ]
1669 // CHECK2:       .omp.reduction.case1:
1670 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[TMP2]], align 4
1671 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[T_VAR]], align 4
1672 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
1673 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP2]], align 4
1674 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP4]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
1675 // CHECK2-NEXT:    [[TMP38:%.*]] = bitcast %struct.S.0* [[TMP4]] to i8*
1676 // CHECK2-NEXT:    [[TMP39:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
1677 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP38]], i8* align 4 [[TMP39]], i64 4, i1 false)
1678 // CHECK2-NEXT:    [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP6]])
1679 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[CALL3]], 0
1680 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
1681 // CHECK2:       land.rhs:
1682 // CHECK2-NEXT:    [[CALL4:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
1683 // CHECK2-NEXT:    [[TOBOOL5:%.*]] = icmp ne i32 [[CALL4]], 0
1684 // CHECK2-NEXT:    br label [[LAND_END]]
1685 // CHECK2:       land.end:
1686 // CHECK2-NEXT:    [[TMP40:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL5]], [[LAND_RHS]] ]
1687 // CHECK2-NEXT:    [[CONV:%.*]] = zext i1 [[TMP40]] to i32
1688 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
1689 // CHECK2-NEXT:    [[TMP41:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8*
1690 // CHECK2-NEXT:    [[TMP42:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
1691 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP41]], i8* align 4 [[TMP42]], i64 4, i1 false)
1692 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
1693 // CHECK2-NEXT:    [[TMP43:%.*]] = load i32, i32* [[TMP8]], align 4
1694 // CHECK2-NEXT:    [[TMP44:%.*]] = load i32, i32* [[T_VAR1]], align 4
1695 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp slt i32 [[TMP43]], [[TMP44]]
1696 // CHECK2-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1697 // CHECK2:       cond.true:
1698 // CHECK2-NEXT:    [[TMP45:%.*]] = load i32, i32* [[TMP8]], align 4
1699 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1700 // CHECK2:       cond.false:
1701 // CHECK2-NEXT:    [[TMP46:%.*]] = load i32, i32* [[T_VAR1]], align 4
1702 // CHECK2-NEXT:    br label [[COND_END]]
1703 // CHECK2:       cond.end:
1704 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP45]], [[COND_TRUE]] ], [ [[TMP46]], [[COND_FALSE]] ]
1705 // CHECK2-NEXT:    store i32 [[COND]], i32* [[TMP8]], align 4
1706 // CHECK2-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1707 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1708 // CHECK2:       .omp.reduction.case2:
1709 // CHECK2-NEXT:    [[TMP47:%.*]] = load i32, i32* [[T_VAR]], align 4
1710 // CHECK2-NEXT:    [[TMP48:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP47]] monotonic, align 4
1711 // CHECK2-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1712 // CHECK2-NEXT:    [[CALL7:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP4]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]])
1713 // CHECK2-NEXT:    [[TMP49:%.*]] = bitcast %struct.S.0* [[TMP4]] to i8*
1714 // CHECK2-NEXT:    [[TMP50:%.*]] = bitcast %struct.S.0* [[CALL7]] to i8*
1715 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP49]], i8* align 4 [[TMP50]], i64 4, i1 false)
1716 // CHECK2-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1717 // CHECK2-NEXT:    call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1718 // CHECK2-NEXT:    [[CALL9:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP6]])
1719 // CHECK2-NEXT:    [[TOBOOL10:%.*]] = icmp ne i32 [[CALL9]], 0
1720 // CHECK2-NEXT:    br i1 [[TOBOOL10]], label [[LAND_RHS11:%.*]], label [[LAND_END14:%.*]]
1721 // CHECK2:       land.rhs11:
1722 // CHECK2-NEXT:    [[CALL12:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
1723 // CHECK2-NEXT:    [[TOBOOL13:%.*]] = icmp ne i32 [[CALL12]], 0
1724 // CHECK2-NEXT:    br label [[LAND_END14]]
1725 // CHECK2:       land.end14:
1726 // CHECK2-NEXT:    [[TMP51:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL13]], [[LAND_RHS11]] ]
1727 // CHECK2-NEXT:    [[CONV15:%.*]] = zext i1 [[TMP51]] to i32
1728 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP8]], i32 [[CONV15]])
1729 // CHECK2-NEXT:    [[TMP52:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8*
1730 // CHECK2-NEXT:    [[TMP53:%.*]] = bitcast %struct.S.0* [[REF_TMP8]] to i8*
1731 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP52]], i8* align 4 [[TMP53]], i64 4, i1 false)
1732 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP8]]) #[[ATTR4]]
1733 // CHECK2-NEXT:    call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
1734 // CHECK2-NEXT:    [[TMP54:%.*]] = load i32, i32* [[T_VAR1]], align 4
1735 // CHECK2-NEXT:    [[TMP55:%.*]] = atomicrmw min i32* [[TMP8]], i32 [[TMP54]] monotonic, align 4
1736 // CHECK2-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1737 // CHECK2:       .omp.reduction.default:
1738 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR4]]
1739 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
1740 // CHECK2-NEXT:    ret void
1741 //
1742 //
1743 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
1744 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
1745 // CHECK2-NEXT:  entry:
1746 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1747 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1748 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1749 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1750 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1751 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1752 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
1753 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1754 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
1755 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
1756 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1757 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1758 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
1759 // CHECK2-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1760 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1761 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
1762 // CHECK2-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
1763 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
1764 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
1765 // CHECK2-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
1766 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S.0*
1767 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
1768 // CHECK2-NEXT:    [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
1769 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S.0*
1770 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
1771 // CHECK2-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
1772 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S.0*
1773 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
1774 // CHECK2-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
1775 // CHECK2-NEXT:    [[TMP26:%.*]] = bitcast i8* [[TMP25]] to i32*
1776 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
1777 // CHECK2-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
1778 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to i32*
1779 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP11]], align 4
1780 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[TMP8]], align 4
1781 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
1782 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1783 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]])
1784 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP17]] to i8*
1785 // CHECK2-NEXT:    [[TMP33:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
1786 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
1787 // CHECK2-NEXT:    [[CALL2:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP23]])
1788 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[CALL2]], 0
1789 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
1790 // CHECK2:       land.rhs:
1791 // CHECK2-NEXT:    [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP20]])
1792 // CHECK2-NEXT:    [[TOBOOL4:%.*]] = icmp ne i32 [[CALL3]], 0
1793 // CHECK2-NEXT:    br label [[LAND_END]]
1794 // CHECK2:       land.end:
1795 // CHECK2-NEXT:    [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
1796 // CHECK2-NEXT:    [[CONV:%.*]] = zext i1 [[TMP34]] to i32
1797 // CHECK2-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
1798 // CHECK2-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP23]] to i8*
1799 // CHECK2-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
1800 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
1801 // CHECK2-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR4]]
1802 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP29]], align 4
1803 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[TMP26]], align 4
1804 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]]
1805 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1806 // CHECK2:       cond.true:
1807 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[TMP29]], align 4
1808 // CHECK2-NEXT:    br label [[COND_END:%.*]]
1809 // CHECK2:       cond.false:
1810 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32, i32* [[TMP26]], align 4
1811 // CHECK2-NEXT:    br label [[COND_END]]
1812 // CHECK2:       cond.end:
1813 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
1814 // CHECK2-NEXT:    store i32 [[COND]], i32* [[TMP29]], align 4
1815 // CHECK2-NEXT:    ret void
1816 //
1817 //
1818 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEanERKS0_
1819 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR6]] align 2 {
1820 // CHECK2-NEXT:  entry:
1821 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1822 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
1823 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1824 // CHECK2-NEXT:    store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
1825 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1826 // CHECK2-NEXT:    ret %struct.S.0* [[THIS1]]
1827 //
1828 //
1829 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEcviEv
1830 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR6]] align 2 {
1831 // CHECK2-NEXT:  entry:
1832 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1833 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1834 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1835 // CHECK2-NEXT:    ret i32 0
1836 //
1837 //
1838 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
1839 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1840 // CHECK2-NEXT:  entry:
1841 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1842 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1843 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1844 // CHECK2-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
1845 // CHECK2-NEXT:    ret void
1846 //
1847 //
1848 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1849 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1850 // CHECK2-NEXT:  entry:
1851 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1852 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1853 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1854 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1855 // CHECK2-NEXT:    [[TMP0:%.*]] = load volatile double, double* @g, align 8
1856 // CHECK2-NEXT:    [[CONV:%.*]] = fptosi double [[TMP0]] to i32
1857 // CHECK2-NEXT:    store i32 [[CONV]], i32* [[F]], align 4
1858 // CHECK2-NEXT:    ret void
1859 //
1860 //
1861 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1862 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1863 // CHECK2-NEXT:  entry:
1864 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1865 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1866 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1867 // CHECK2-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1868 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1869 // CHECK2-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1870 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1871 // CHECK2-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP0]] to double
1872 // CHECK2-NEXT:    [[TMP1:%.*]] = load volatile double, double* @g, align 8
1873 // CHECK2-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], [[TMP1]]
1874 // CHECK2-NEXT:    [[CONV2:%.*]] = fptosi double [[ADD]] to i32
1875 // CHECK2-NEXT:    store i32 [[CONV2]], i32* [[F]], align 4
1876 // CHECK2-NEXT:    ret void
1877 //
1878 //
1879 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1880 // CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1881 // CHECK2-NEXT:  entry:
1882 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1883 // CHECK2-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1884 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1885 // CHECK2-NEXT:    ret void
1886 //
1887 //
1888 // CHECK3-LABEL: define {{[^@]+}}@main
1889 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
1890 // CHECK3-NEXT:  entry:
1891 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1892 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
1893 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1894 // CHECK3-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
1895 // CHECK3-NEXT:    ret i32 0
1896 //
1897 //
1898 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
1899 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] {
1900 // CHECK3-NEXT:  entry:
1901 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1902 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1903 // CHECK3-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
1904 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
1905 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
1906 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
1907 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
1908 // CHECK3-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
1909 // CHECK3-NEXT:    [[G:%.*]] = alloca double, align 8
1910 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1911 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1912 // CHECK3-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca double, align 8
1913 // CHECK3-NEXT:    [[TMP:%.*]] = alloca double, align 8
1914 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1915 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1916 // CHECK3-NEXT:    store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
1917 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
1918 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1919 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1920 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
1921 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
1922 // CHECK3-NEXT:    store double 0.000000e+00, double* [[G]], align 8
1923 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1924 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1925 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
1926 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1927 // CHECK3-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP3]], 1
1928 // CHECK3-NEXT:    [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 1
1929 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
1930 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
1931 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1932 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1933 // CHECK3:       omp.inner.for.cond:
1934 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1935 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
1936 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
1937 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1938 // CHECK3:       omp.inner.for.body:
1939 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1940 // CHECK3-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
1941 // CHECK3-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
1942 // CHECK3-NEXT:    i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
1943 // CHECK3-NEXT:    ]
1944 // CHECK3:       .omp.sections.case:
1945 // CHECK3-NEXT:    store double 1.000000e+00, double* [[G]], align 8
1946 // CHECK3-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1947 // CHECK3:       .omp.sections.case1:
1948 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1949 // CHECK3-NEXT:    store double* [[G]], double** [[TMP10]], align 8
1950 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(8) [[REF_TMP]])
1951 // CHECK3-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
1952 // CHECK3:       .omp.sections.exit:
1953 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1954 // CHECK3:       omp.inner.for.inc:
1955 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
1956 // CHECK3-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP11]], 1
1957 // CHECK3-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
1958 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
1959 // CHECK3:       omp.inner.for.end:
1960 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1961 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1962 // CHECK3-NEXT:    [[TMP13:%.*]] = bitcast double* [[G]] to i8*
1963 // CHECK3-NEXT:    store i8* [[TMP13]], i8** [[TMP12]], align 8
1964 // CHECK3-NEXT:    [[TMP14:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1965 // CHECK3-NEXT:    [[TMP15:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP14]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1966 // CHECK3-NEXT:    switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1967 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1968 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1969 // CHECK3-NEXT:    ]
1970 // CHECK3:       .omp.reduction.case1:
1971 // CHECK3-NEXT:    [[TMP16:%.*]] = load double, double* @g, align 8
1972 // CHECK3-NEXT:    [[TMP17:%.*]] = load double, double* [[G]], align 8
1973 // CHECK3-NEXT:    [[ADD:%.*]] = fadd double [[TMP16]], [[TMP17]]
1974 // CHECK3-NEXT:    store double [[ADD]], double* @g, align 8
1975 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1976 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1977 // CHECK3:       .omp.reduction.case2:
1978 // CHECK3-NEXT:    [[TMP18:%.*]] = load double, double* [[G]], align 8
1979 // CHECK3-NEXT:    [[ATOMIC_LOAD:%.*]] = load atomic i64, i64* bitcast (double* @g to i64*) monotonic, align 8
1980 // CHECK3-NEXT:    br label [[ATOMIC_CONT:%.*]]
1981 // CHECK3:       atomic_cont:
1982 // CHECK3-NEXT:    [[TMP19:%.*]] = phi i64 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP26:%.*]], [[ATOMIC_CONT]] ]
1983 // CHECK3-NEXT:    [[TMP20:%.*]] = bitcast double* [[ATOMIC_TEMP]] to i64*
1984 // CHECK3-NEXT:    [[TMP21:%.*]] = bitcast i64 [[TMP19]] to double
1985 // CHECK3-NEXT:    store double [[TMP21]], double* [[TMP]], align 8
1986 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[TMP]], align 8
1987 // CHECK3-NEXT:    [[TMP23:%.*]] = load double, double* [[G]], align 8
1988 // CHECK3-NEXT:    [[ADD2:%.*]] = fadd double [[TMP22]], [[TMP23]]
1989 // CHECK3-NEXT:    store double [[ADD2]], double* [[ATOMIC_TEMP]], align 8
1990 // CHECK3-NEXT:    [[TMP24:%.*]] = load i64, i64* [[TMP20]], align 8
1991 // CHECK3-NEXT:    [[TMP25:%.*]] = cmpxchg i64* bitcast (double* @g to i64*), i64 [[TMP19]], i64 [[TMP24]] monotonic monotonic, align 8
1992 // CHECK3-NEXT:    [[TMP26]] = extractvalue { i64, i1 } [[TMP25]], 0
1993 // CHECK3-NEXT:    [[TMP27:%.*]] = extractvalue { i64, i1 } [[TMP25]], 1
1994 // CHECK3-NEXT:    br i1 [[TMP27]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
1995 // CHECK3:       atomic_exit:
1996 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1997 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1998 // CHECK3:       .omp.reduction.default:
1999 // CHECK3-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]])
2000 // CHECK3-NEXT:    ret void
2001 //
2002 //
2003 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
2004 // CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
2005 // CHECK3-NEXT:  entry:
2006 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2007 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
2008 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2009 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
2010 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
2011 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2012 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
2013 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2014 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
2015 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2016 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*
2017 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
2018 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
2019 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*
2020 // CHECK3-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
2021 // CHECK3-NEXT:    [[TMP13:%.*]] = load double, double* [[TMP8]], align 8
2022 // CHECK3-NEXT:    [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
2023 // CHECK3-NEXT:    store double [[ADD]], double* [[TMP11]], align 8
2024 // CHECK3-NEXT:    ret void
2025 //
2026 //
2027 // CHECK4-LABEL: define {{[^@]+}}@main
2028 // CHECK4-SAME: () #[[ATTR1:[0-9]+]] {
2029 // CHECK4-NEXT:  entry:
2030 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2031 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2032 // CHECK4-NEXT:    [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
2033 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
2034 // CHECK4-NEXT:    call void [[TMP1]](i8* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
2035 // CHECK4-NEXT:    ret i32 0
2036 //
2037 //
2038 // CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke
2039 // CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
2040 // CHECK4-NEXT:  entry:
2041 // CHECK4-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
2042 // CHECK4-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
2043 // CHECK4-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
2044 // CHECK4-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2045 // CHECK4-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
2046 // CHECK4-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
2047 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
2048 // CHECK4-NEXT:    ret void
2049 //
2050 //
2051 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
2052 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] {
2053 // CHECK4-NEXT:  entry:
2054 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2055 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2056 // CHECK4-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
2057 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
2058 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
2059 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
2060 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
2061 // CHECK4-NEXT:    [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
2062 // CHECK4-NEXT:    [[G:%.*]] = alloca double, align 8
2063 // CHECK4-NEXT:    [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, align 8
2064 // CHECK4-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
2065 // CHECK4-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca double, align 8
2066 // CHECK4-NEXT:    [[TMP:%.*]] = alloca double, align 8
2067 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2068 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2069 // CHECK4-NEXT:    store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
2070 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
2071 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4
2072 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4
2073 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4
2074 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4
2075 // CHECK4-NEXT:    store double 0.000000e+00, double* [[G]], align 8
2076 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2077 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2078 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
2079 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
2080 // CHECK4-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP3]], 1
2081 // CHECK4-NEXT:    [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 1
2082 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_SECTIONS_UB_]], align 4
2083 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4
2084 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
2085 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2086 // CHECK4:       omp.inner.for.cond:
2087 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
2088 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4
2089 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
2090 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2091 // CHECK4:       omp.inner.for.body:
2092 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
2093 // CHECK4-NEXT:    switch i32 [[TMP9]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
2094 // CHECK4-NEXT:    i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
2095 // CHECK4-NEXT:    i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
2096 // CHECK4-NEXT:    ]
2097 // CHECK4:       .omp.sections.case:
2098 // CHECK4-NEXT:    store double 1.000000e+00, double* [[G]], align 8
2099 // CHECK4-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
2100 // CHECK4:       .omp.sections.case1:
2101 // CHECK4-NEXT:    [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 0
2102 // CHECK4-NEXT:    store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
2103 // CHECK4-NEXT:    [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 1
2104 // CHECK4-NEXT:    store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
2105 // CHECK4-NEXT:    [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 2
2106 // CHECK4-NEXT:    store i32 0, i32* [[BLOCK_RESERVED]], align 4
2107 // CHECK4-NEXT:    [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 3
2108 // CHECK4-NEXT:    store i8* bitcast (void (i8*)* @_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8
2109 // CHECK4-NEXT:    [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 4
2110 // CHECK4-NEXT:    store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
2111 // CHECK4-NEXT:    [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 5
2112 // CHECK4-NEXT:    [[TMP10:%.*]] = load volatile double, double* [[G]], align 8
2113 // CHECK4-NEXT:    store volatile double [[TMP10]], double* [[BLOCK_CAPTURED]], align 8
2114 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]] to void ()*
2115 // CHECK4-NEXT:    [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP11]] to %struct.__block_literal_generic*
2116 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
2117 // CHECK4-NEXT:    [[TMP13:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
2118 // CHECK4-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[TMP12]], align 8
2119 // CHECK4-NEXT:    [[TMP15:%.*]] = bitcast i8* [[TMP14]] to void (i8*)*
2120 // CHECK4-NEXT:    call void [[TMP15]](i8* [[TMP13]])
2121 // CHECK4-NEXT:    br label [[DOTOMP_SECTIONS_EXIT]]
2122 // CHECK4:       .omp.sections.exit:
2123 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2124 // CHECK4:       omp.inner.for.inc:
2125 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4
2126 // CHECK4-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP16]], 1
2127 // CHECK4-NEXT:    store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4
2128 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
2129 // CHECK4:       omp.inner.for.end:
2130 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2131 // CHECK4-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
2132 // CHECK4-NEXT:    [[TMP18:%.*]] = bitcast double* [[G]] to i8*
2133 // CHECK4-NEXT:    store i8* [[TMP18]], i8** [[TMP17]], align 8
2134 // CHECK4-NEXT:    [[TMP19:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2135 // CHECK4-NEXT:    [[TMP20:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
2136 // CHECK4-NEXT:    switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
2137 // CHECK4-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
2138 // CHECK4-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
2139 // CHECK4-NEXT:    ]
2140 // CHECK4:       .omp.reduction.case1:
2141 // CHECK4-NEXT:    [[TMP21:%.*]] = load double, double* @g, align 8
2142 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[G]], align 8
2143 // CHECK4-NEXT:    [[ADD:%.*]] = fadd double [[TMP21]], [[TMP22]]
2144 // CHECK4-NEXT:    store double [[ADD]], double* @g, align 8
2145 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2146 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2147 // CHECK4:       .omp.reduction.case2:
2148 // CHECK4-NEXT:    [[TMP23:%.*]] = load double, double* [[G]], align 8
2149 // CHECK4-NEXT:    [[ATOMIC_LOAD:%.*]] = load atomic i64, i64* bitcast (double* @g to i64*) monotonic, align 8
2150 // CHECK4-NEXT:    br label [[ATOMIC_CONT:%.*]]
2151 // CHECK4:       atomic_cont:
2152 // CHECK4-NEXT:    [[TMP24:%.*]] = phi i64 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP31:%.*]], [[ATOMIC_CONT]] ]
2153 // CHECK4-NEXT:    [[TMP25:%.*]] = bitcast double* [[ATOMIC_TEMP]] to i64*
2154 // CHECK4-NEXT:    [[TMP26:%.*]] = bitcast i64 [[TMP24]] to double
2155 // CHECK4-NEXT:    store double [[TMP26]], double* [[TMP]], align 8
2156 // CHECK4-NEXT:    [[TMP27:%.*]] = load double, double* [[TMP]], align 8
2157 // CHECK4-NEXT:    [[TMP28:%.*]] = load double, double* [[G]], align 8
2158 // CHECK4-NEXT:    [[ADD2:%.*]] = fadd double [[TMP27]], [[TMP28]]
2159 // CHECK4-NEXT:    store double [[ADD2]], double* [[ATOMIC_TEMP]], align 8
2160 // CHECK4-NEXT:    [[TMP29:%.*]] = load i64, i64* [[TMP25]], align 8
2161 // CHECK4-NEXT:    [[TMP30:%.*]] = cmpxchg i64* bitcast (double* @g to i64*), i64 [[TMP24]], i64 [[TMP29]] monotonic monotonic, align 8
2162 // CHECK4-NEXT:    [[TMP31]] = extractvalue { i64, i1 } [[TMP30]], 0
2163 // CHECK4-NEXT:    [[TMP32:%.*]] = extractvalue { i64, i1 } [[TMP30]], 1
2164 // CHECK4-NEXT:    br i1 [[TMP32]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
2165 // CHECK4:       atomic_exit:
2166 // CHECK4-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
2167 // CHECK4-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
2168 // CHECK4:       .omp.reduction.default:
2169 // CHECK4-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]])
2170 // CHECK4-NEXT:    ret void
2171 //
2172 //
2173 // CHECK4-LABEL: define {{[^@]+}}@_block_invoke
2174 // CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
2175 // CHECK4-NEXT:  entry:
2176 // CHECK4-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
2177 // CHECK4-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>*, align 8
2178 // CHECK4-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2179 // CHECK4-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>*
2180 // CHECK4-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>** [[BLOCK_ADDR]], align 8
2181 // CHECK4-NEXT:    [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double }>* [[BLOCK]], i32 0, i32 5
2182 // CHECK4-NEXT:    store double 2.000000e+00, double* [[BLOCK_CAPTURE_ADDR]], align 8
2183 // CHECK4-NEXT:    ret void
2184 //
2185 //
2186 // CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
2187 // CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
2188 // CHECK4-NEXT:  entry:
2189 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
2190 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
2191 // CHECK4-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
2192 // CHECK4-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
2193 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
2194 // CHECK4-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
2195 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
2196 // CHECK4-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2197 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
2198 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2199 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*
2200 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
2201 // CHECK4-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
2202 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*
2203 // CHECK4-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
2204 // CHECK4-NEXT:    [[TMP13:%.*]] = load double, double* [[TMP8]], align 8
2205 // CHECK4-NEXT:    [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
2206 // CHECK4-NEXT:    store double [[ADD]], double* [[TMP11]], align 8
2207 // CHECK4-NEXT:    ret void
2208 //
2209