1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1
5 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
7 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3
8
9 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
10 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
11 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
14 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
15
16 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK9
17 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK9
19
20 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
21 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
22 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
23
24 // expected-no-diagnostics
25 #ifndef HEADER
26 #define HEADER
27
28 struct St {
29 int a, b;
StSt30 St() : a(0), b(0) {}
StSt31 St(const St &st) : a(st.a + st.b), b(0) {}
~StSt32 ~St() {}
33 };
34
35 volatile int g = 1212;
36 volatile int &g1 = g;
37
38 template <class T>
39 struct S {
40 T f;
SS41 S(T a) : f(a + g) {}
SS42 S() : f(g) {}
SS43 S(const S &s, St t = St()) : f(s.f + t.a) {}
operator TS44 operator T() { return T(); }
~SS45 ~S() {}
46 };
47
48
49 template <typename T>
tmain()50 T tmain() {
51 S<T> test;
52 T t_var = T();
53 T vec[] = {1, 2};
54 S<T> s_arr[] = {1, 2};
55 S<T> &var = test;
56 #pragma omp target
57 #pragma omp teams distribute private(t_var, vec, s_arr, var)
58 for (int i = 0; i < 2; ++i) {
59 vec[i] = t_var;
60 s_arr[i] = var;
61 }
62 return T();
63 }
64
65 S<float> test;
66 int t_var = 333;
67 int vec[] = {1, 2};
68 S<float> s_arr[] = {1, 2};
69 S<float> var(3);
70
main()71 int main() {
72 static int sivar;
73 #ifdef LAMBDA
74 [&]() {
75 #pragma omp target
76 #pragma omp teams distribute private(g, g1, sivar)
77 for (int i = 0; i < 2; ++i) {
78
79 // Skip global, bound tid and loop vars
80 g = 1;
81 g1 = 1;
82 sivar = 2;
83 [&]() {
84 g = 2;
85 g1 = 2;
86 sivar = 4;
87
88 }();
89 }
90 }();
91 return 0;
92 #else
93 #pragma omp target
94 #pragma omp teams distribute private(t_var, vec, s_arr, var, sivar)
95 for (int i = 0; i < 2; ++i) {
96 vec[i] = t_var;
97 s_arr[i] = var;
98 sivar += i;
99 }
100 return tmain<int>();
101 #endif
102 }
103
104
105
106 // Skip global, bound tid and loop vars
107
108 // private(s_arr)
109
110 // private(var)
111
112
113
114
115
116 // Skip global, bound tid and loop vars
117
118 // private(s_arr)
119
120
121 // private(var)
122
123
124 #endif
125 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init
126 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
127 // CHECK1-NEXT: entry:
128 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) @test)
129 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
130 // CHECK1-NEXT: ret void
131 //
132 //
133 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
134 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
135 // CHECK1-NEXT: entry:
136 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
137 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
138 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
139 // CHECK1-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
140 // CHECK1-NEXT: ret void
141 //
142 //
143 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
144 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
145 // CHECK1-NEXT: entry:
146 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
147 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
148 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
149 // CHECK1-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
150 // CHECK1-NEXT: ret void
151 //
152 //
153 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
154 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
155 // CHECK1-NEXT: entry:
156 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
157 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
158 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
159 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
160 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
161 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
162 // CHECK1-NEXT: store float [[CONV]], float* [[F]], align 4
163 // CHECK1-NEXT: ret void
164 //
165 //
166 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
167 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
168 // CHECK1-NEXT: entry:
169 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
170 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
171 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
172 // CHECK1-NEXT: ret void
173 //
174 //
175 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
176 // CHECK1-SAME: () #[[ATTR0]] {
177 // CHECK1-NEXT: entry:
178 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00)
179 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00)
180 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
181 // CHECK1-NEXT: ret void
182 //
183 //
184 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
185 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
186 // CHECK1-NEXT: entry:
187 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
188 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
189 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
190 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
191 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
192 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
193 // CHECK1-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
194 // CHECK1-NEXT: ret void
195 //
196 //
197 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
198 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] {
199 // CHECK1-NEXT: entry:
200 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
201 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
202 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
203 // CHECK1: arraydestroy.body:
204 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
205 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
206 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
207 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
208 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
209 // CHECK1: arraydestroy.done1:
210 // CHECK1-NEXT: ret void
211 //
212 //
213 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
214 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
215 // CHECK1-NEXT: entry:
216 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
217 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
218 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
219 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
220 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
221 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
222 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
223 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
224 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
225 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
226 // CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4
227 // CHECK1-NEXT: ret void
228 //
229 //
230 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
231 // CHECK1-SAME: () #[[ATTR0]] {
232 // CHECK1-NEXT: entry:
233 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) @var, float noundef 3.000000e+00)
234 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
235 // CHECK1-NEXT: ret void
236 //
237 //
238 // CHECK1-LABEL: define {{[^@]+}}@main
239 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
240 // CHECK1-NEXT: entry:
241 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
242 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
243 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
244 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
245 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
246 // CHECK1-NEXT: store i32 1, i32* [[TMP0]], align 4
247 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
248 // CHECK1-NEXT: store i32 0, i32* [[TMP1]], align 4
249 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
250 // CHECK1-NEXT: store i8** null, i8*** [[TMP2]], align 8
251 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
252 // CHECK1-NEXT: store i8** null, i8*** [[TMP3]], align 8
253 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
254 // CHECK1-NEXT: store i64* null, i64** [[TMP4]], align 8
255 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
256 // CHECK1-NEXT: store i64* null, i64** [[TMP5]], align 8
257 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
258 // CHECK1-NEXT: store i8** null, i8*** [[TMP6]], align 8
259 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
260 // CHECK1-NEXT: store i8** null, i8*** [[TMP7]], align 8
261 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
262 // CHECK1-NEXT: store i64 2, i64* [[TMP8]], align 8
263 // CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
264 // CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
265 // CHECK1-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
266 // CHECK1: omp_offload.failed:
267 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93() #[[ATTR2]]
268 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
269 // CHECK1: omp_offload.cont:
270 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v()
271 // CHECK1-NEXT: ret i32 [[CALL]]
272 //
273 //
274 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93
275 // CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
276 // CHECK1-NEXT: entry:
277 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
278 // CHECK1-NEXT: ret void
279 //
280 //
281 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
282 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
283 // CHECK1-NEXT: entry:
284 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
285 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
286 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
287 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
288 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
289 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
290 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
291 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
292 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
293 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
294 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
295 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
296 // CHECK1-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
297 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
298 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
299 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
300 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
301 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
302 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
303 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
304 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
305 // CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
306 // CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
307 // CHECK1: arrayctor.loop:
308 // CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
309 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
310 // CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
311 // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
312 // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
313 // CHECK1: arrayctor.cont:
314 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]])
315 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
316 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
317 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
318 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
319 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
320 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
321 // CHECK1: cond.true:
322 // CHECK1-NEXT: br label [[COND_END:%.*]]
323 // CHECK1: cond.false:
324 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
325 // CHECK1-NEXT: br label [[COND_END]]
326 // CHECK1: cond.end:
327 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
328 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
329 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
330 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
331 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
332 // CHECK1: omp.inner.for.cond:
333 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
334 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
335 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
336 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
337 // CHECK1: omp.inner.for.cond.cleanup:
338 // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
339 // CHECK1: omp.inner.for.body:
340 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
341 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
342 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
343 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
344 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
345 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4
346 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
347 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
348 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
349 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
350 // CHECK1-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP10]] to i64
351 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 [[IDXPROM2]]
352 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.S* [[ARRAYIDX3]] to i8*
353 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[VAR]] to i8*
354 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP11]], i8* align 4 [[TMP12]], i64 4, i1 false)
355 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
356 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[SIVAR]], align 4
357 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]]
358 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[SIVAR]], align 4
359 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
360 // CHECK1: omp.body.continue:
361 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
362 // CHECK1: omp.inner.for.inc:
363 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
364 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], 1
365 // CHECK1-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
366 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
367 // CHECK1: omp.inner.for.end:
368 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
369 // CHECK1: omp.loop.exit:
370 // CHECK1-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
371 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
372 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP17]])
373 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
374 // CHECK1-NEXT: [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
375 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN6]], i64 2
376 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
377 // CHECK1: arraydestroy.body:
378 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP18]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
379 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
380 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
381 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]]
382 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE7:%.*]], label [[ARRAYDESTROY_BODY]]
383 // CHECK1: arraydestroy.done7:
384 // CHECK1-NEXT: ret void
385 //
386 //
387 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
388 // CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat {
389 // CHECK1-NEXT: entry:
390 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
391 // CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
392 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
393 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
394 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
395 // CHECK1-NEXT: [[VAR:%.*]] = alloca %struct.S.0*, align 8
396 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
397 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 8
398 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
399 // CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4
400 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
401 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
402 // CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
403 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef signext 1)
404 // CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
405 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef signext 2)
406 // CHECK1-NEXT: store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8
407 // CHECK1-NEXT: store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 8
408 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
409 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
410 // CHECK1-NEXT: store i32 1, i32* [[TMP1]], align 4
411 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
412 // CHECK1-NEXT: store i32 0, i32* [[TMP2]], align 4
413 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
414 // CHECK1-NEXT: store i8** null, i8*** [[TMP3]], align 8
415 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
416 // CHECK1-NEXT: store i8** null, i8*** [[TMP4]], align 8
417 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
418 // CHECK1-NEXT: store i64* null, i64** [[TMP5]], align 8
419 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
420 // CHECK1-NEXT: store i64* null, i64** [[TMP6]], align 8
421 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
422 // CHECK1-NEXT: store i8** null, i8*** [[TMP7]], align 8
423 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
424 // CHECK1-NEXT: store i8** null, i8*** [[TMP8]], align 8
425 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
426 // CHECK1-NEXT: store i64 2, i64* [[TMP9]], align 8
427 // CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
428 // CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
429 // CHECK1-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
430 // CHECK1: omp_offload.failed:
431 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56() #[[ATTR2]]
432 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
433 // CHECK1: omp_offload.cont:
434 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
435 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
436 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
437 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
438 // CHECK1: arraydestroy.body:
439 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP12]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
440 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
441 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
442 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
443 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
444 // CHECK1: arraydestroy.done2:
445 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR2]]
446 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[RETVAL]], align 4
447 // CHECK1-NEXT: ret i32 [[TMP13]]
448 //
449 //
450 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
451 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
452 // CHECK1-NEXT: entry:
453 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
454 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
455 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
456 // CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
457 // CHECK1-NEXT: ret void
458 //
459 //
460 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
461 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
462 // CHECK1-NEXT: entry:
463 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
464 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
465 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
466 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
467 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
468 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
469 // CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef signext [[TMP0]])
470 // CHECK1-NEXT: ret void
471 //
472 //
473 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56
474 // CHECK1-SAME: () #[[ATTR4]] {
475 // CHECK1-NEXT: entry:
476 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*))
477 // CHECK1-NEXT: ret void
478 //
479 //
480 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
481 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
482 // CHECK1-NEXT: entry:
483 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
484 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
485 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
486 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
487 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 8
488 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
489 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
490 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
491 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
492 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
493 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
494 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
495 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
496 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca %struct.S.0*, align 8
497 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
498 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
499 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
500 // CHECK1-NEXT: store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 8
501 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
502 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
503 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
504 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
505 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
506 // CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
507 // CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
508 // CHECK1: arrayctor.loop:
509 // CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
510 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
511 // CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i64 1
512 // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
513 // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
514 // CHECK1: arrayctor.cont:
515 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]])
516 // CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[_TMP2]], align 8
517 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
518 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
519 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
520 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
521 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
522 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
523 // CHECK1: cond.true:
524 // CHECK1-NEXT: br label [[COND_END:%.*]]
525 // CHECK1: cond.false:
526 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
527 // CHECK1-NEXT: br label [[COND_END]]
528 // CHECK1: cond.end:
529 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
530 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
531 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
532 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
533 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
534 // CHECK1: omp.inner.for.cond:
535 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
536 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
537 // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
538 // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
539 // CHECK1: omp.inner.for.cond.cleanup:
540 // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
541 // CHECK1: omp.inner.for.body:
542 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
543 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
544 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
545 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
546 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
547 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4
548 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
549 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]]
550 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
551 // CHECK1-NEXT: [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP2]], align 8
552 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
553 // CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64
554 // CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 [[IDXPROM4]]
555 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast %struct.S.0* [[ARRAYIDX5]] to i8*
556 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S.0* [[TMP10]] to i8*
557 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i64 4, i1 false)
558 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
559 // CHECK1: omp.body.continue:
560 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
561 // CHECK1: omp.inner.for.inc:
562 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
563 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], 1
564 // CHECK1-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
565 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
566 // CHECK1: omp.inner.for.end:
567 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
568 // CHECK1: omp.loop.exit:
569 // CHECK1-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
570 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
571 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP16]])
572 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
573 // CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
574 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN7]], i64 2
575 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
576 // CHECK1: arraydestroy.body:
577 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP17]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
578 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
579 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
580 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
581 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
582 // CHECK1: arraydestroy.done8:
583 // CHECK1-NEXT: ret void
584 //
585 //
586 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
587 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
588 // CHECK1-NEXT: entry:
589 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
590 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
591 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
592 // CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
593 // CHECK1-NEXT: ret void
594 //
595 //
596 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
597 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
598 // CHECK1-NEXT: entry:
599 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
600 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
601 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
602 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
603 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
604 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[F]], align 4
605 // CHECK1-NEXT: ret void
606 //
607 //
608 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
609 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef signext [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
610 // CHECK1-NEXT: entry:
611 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
612 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
613 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
614 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
615 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
616 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
617 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
618 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
619 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
620 // CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4
621 // CHECK1-NEXT: ret void
622 //
623 //
624 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
625 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
626 // CHECK1-NEXT: entry:
627 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
628 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
629 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
630 // CHECK1-NEXT: ret void
631 //
632 //
633 // CHECK1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_teams_distribute_private_codegen.cpp
634 // CHECK1-SAME: () #[[ATTR0]] {
635 // CHECK1-NEXT: entry:
636 // CHECK1-NEXT: call void @__cxx_global_var_init()
637 // CHECK1-NEXT: call void @__cxx_global_var_init.1()
638 // CHECK1-NEXT: call void @__cxx_global_var_init.2()
639 // CHECK1-NEXT: ret void
640 //
641 //
642 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
643 // CHECK1-SAME: () #[[ATTR0]] {
644 // CHECK1-NEXT: entry:
645 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1)
646 // CHECK1-NEXT: ret void
647 //
648 //
649 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init
650 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
651 // CHECK3-NEXT: entry:
652 // CHECK3-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) @test)
653 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
654 // CHECK3-NEXT: ret void
655 //
656 //
657 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
658 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
659 // CHECK3-NEXT: entry:
660 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
661 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
662 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
663 // CHECK3-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
664 // CHECK3-NEXT: ret void
665 //
666 //
667 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
668 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
669 // CHECK3-NEXT: entry:
670 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
671 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
672 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
673 // CHECK3-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
674 // CHECK3-NEXT: ret void
675 //
676 //
677 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
678 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
679 // CHECK3-NEXT: entry:
680 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
681 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
682 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
683 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
684 // CHECK3-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
685 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
686 // CHECK3-NEXT: store float [[CONV]], float* [[F]], align 4
687 // CHECK3-NEXT: ret void
688 //
689 //
690 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
691 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
692 // CHECK3-NEXT: entry:
693 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
694 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
695 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
696 // CHECK3-NEXT: ret void
697 //
698 //
699 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
700 // CHECK3-SAME: () #[[ATTR0]] {
701 // CHECK3-NEXT: entry:
702 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), float noundef 1.000000e+00)
703 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 1), float noundef 2.000000e+00)
704 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
705 // CHECK3-NEXT: ret void
706 //
707 //
708 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
709 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
710 // CHECK3-NEXT: entry:
711 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
712 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
713 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
714 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4
715 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
716 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
717 // CHECK3-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
718 // CHECK3-NEXT: ret void
719 //
720 //
721 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
722 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] {
723 // CHECK3-NEXT: entry:
724 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
725 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
726 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
727 // CHECK3: arraydestroy.body:
728 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 1, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
729 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
730 // CHECK3-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
731 // CHECK3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
732 // CHECK3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
733 // CHECK3: arraydestroy.done1:
734 // CHECK3-NEXT: ret void
735 //
736 //
737 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
738 // CHECK3-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
739 // CHECK3-NEXT: entry:
740 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 4
741 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
742 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 4
743 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4
744 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 4
745 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
746 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
747 // CHECK3-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
748 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
749 // CHECK3-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
750 // CHECK3-NEXT: store float [[ADD]], float* [[F]], align 4
751 // CHECK3-NEXT: ret void
752 //
753 //
754 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
755 // CHECK3-SAME: () #[[ATTR0]] {
756 // CHECK3-NEXT: entry:
757 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) @var, float noundef 3.000000e+00)
758 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
759 // CHECK3-NEXT: ret void
760 //
761 //
762 // CHECK3-LABEL: define {{[^@]+}}@main
763 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] {
764 // CHECK3-NEXT: entry:
765 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
766 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
767 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
768 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
769 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
770 // CHECK3-NEXT: store i32 1, i32* [[TMP0]], align 4
771 // CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
772 // CHECK3-NEXT: store i32 0, i32* [[TMP1]], align 4
773 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
774 // CHECK3-NEXT: store i8** null, i8*** [[TMP2]], align 4
775 // CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
776 // CHECK3-NEXT: store i8** null, i8*** [[TMP3]], align 4
777 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
778 // CHECK3-NEXT: store i64* null, i64** [[TMP4]], align 4
779 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
780 // CHECK3-NEXT: store i64* null, i64** [[TMP5]], align 4
781 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
782 // CHECK3-NEXT: store i8** null, i8*** [[TMP6]], align 4
783 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
784 // CHECK3-NEXT: store i8** null, i8*** [[TMP7]], align 4
785 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
786 // CHECK3-NEXT: store i64 2, i64* [[TMP8]], align 8
787 // CHECK3-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
788 // CHECK3-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
789 // CHECK3-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
790 // CHECK3: omp_offload.failed:
791 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93() #[[ATTR2]]
792 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
793 // CHECK3: omp_offload.cont:
794 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
795 // CHECK3-NEXT: ret i32 [[CALL]]
796 //
797 //
798 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93
799 // CHECK3-SAME: () #[[ATTR4:[0-9]+]] {
800 // CHECK3-NEXT: entry:
801 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
802 // CHECK3-NEXT: ret void
803 //
804 //
805 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
806 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
807 // CHECK3-NEXT: entry:
808 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
809 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
810 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
811 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
812 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
813 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
814 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
815 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
816 // CHECK3-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
817 // CHECK3-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
818 // CHECK3-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
819 // CHECK3-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4
820 // CHECK3-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
821 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
822 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
823 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
824 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
825 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
826 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
827 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
828 // CHECK3-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
829 // CHECK3-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i32 2
830 // CHECK3-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
831 // CHECK3: arrayctor.loop:
832 // CHECK3-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
833 // CHECK3-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
834 // CHECK3-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i32 1
835 // CHECK3-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
836 // CHECK3-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
837 // CHECK3: arrayctor.cont:
838 // CHECK3-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]])
839 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
840 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
841 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
842 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
843 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
844 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
845 // CHECK3: cond.true:
846 // CHECK3-NEXT: br label [[COND_END:%.*]]
847 // CHECK3: cond.false:
848 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
849 // CHECK3-NEXT: br label [[COND_END]]
850 // CHECK3: cond.end:
851 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
852 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
853 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
854 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
855 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
856 // CHECK3: omp.inner.for.cond:
857 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
858 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
859 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
860 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
861 // CHECK3: omp.inner.for.cond.cleanup:
862 // CHECK3-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
863 // CHECK3: omp.inner.for.body:
864 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
865 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
866 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
867 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
868 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
869 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4
870 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP9]]
871 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
872 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
873 // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 [[TMP10]]
874 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast %struct.S* [[ARRAYIDX2]] to i8*
875 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[VAR]] to i8*
876 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP11]], i8* align 4 [[TMP12]], i32 4, i1 false)
877 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
878 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[SIVAR]], align 4
879 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], [[TMP13]]
880 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[SIVAR]], align 4
881 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
882 // CHECK3: omp.body.continue:
883 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
884 // CHECK3: omp.inner.for.inc:
885 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
886 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1
887 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
888 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
889 // CHECK3: omp.inner.for.end:
890 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
891 // CHECK3: omp.loop.exit:
892 // CHECK3-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
893 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
894 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP17]])
895 // CHECK3-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
896 // CHECK3-NEXT: [[ARRAY_BEGIN5:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
897 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN5]], i32 2
898 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
899 // CHECK3: arraydestroy.body:
900 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP18]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
901 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
902 // CHECK3-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
903 // CHECK3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN5]]
904 // CHECK3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE6:%.*]], label [[ARRAYDESTROY_BODY]]
905 // CHECK3: arraydestroy.done6:
906 // CHECK3-NEXT: ret void
907 //
908 //
909 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
910 // CHECK3-SAME: () #[[ATTR6:[0-9]+]] comdat {
911 // CHECK3-NEXT: entry:
912 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
913 // CHECK3-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
914 // CHECK3-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
915 // CHECK3-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
916 // CHECK3-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
917 // CHECK3-NEXT: [[VAR:%.*]] = alloca %struct.S.0*, align 4
918 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
919 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 4
920 // CHECK3-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
921 // CHECK3-NEXT: store i32 0, i32* [[T_VAR]], align 4
922 // CHECK3-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
923 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
924 // CHECK3-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
925 // CHECK3-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
926 // CHECK3-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i32 1
927 // CHECK3-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
928 // CHECK3-NEXT: store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 4
929 // CHECK3-NEXT: store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 4
930 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
931 // CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
932 // CHECK3-NEXT: store i32 1, i32* [[TMP1]], align 4
933 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
934 // CHECK3-NEXT: store i32 0, i32* [[TMP2]], align 4
935 // CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
936 // CHECK3-NEXT: store i8** null, i8*** [[TMP3]], align 4
937 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
938 // CHECK3-NEXT: store i8** null, i8*** [[TMP4]], align 4
939 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
940 // CHECK3-NEXT: store i64* null, i64** [[TMP5]], align 4
941 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
942 // CHECK3-NEXT: store i64* null, i64** [[TMP6]], align 4
943 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
944 // CHECK3-NEXT: store i8** null, i8*** [[TMP7]], align 4
945 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
946 // CHECK3-NEXT: store i8** null, i8*** [[TMP8]], align 4
947 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
948 // CHECK3-NEXT: store i64 2, i64* [[TMP9]], align 8
949 // CHECK3-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
950 // CHECK3-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
951 // CHECK3-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
952 // CHECK3: omp_offload.failed:
953 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56() #[[ATTR2]]
954 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
955 // CHECK3: omp_offload.cont:
956 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
957 // CHECK3-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
958 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
959 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
960 // CHECK3: arraydestroy.body:
961 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP12]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
962 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
963 // CHECK3-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
964 // CHECK3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
965 // CHECK3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
966 // CHECK3: arraydestroy.done2:
967 // CHECK3-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR2]]
968 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[RETVAL]], align 4
969 // CHECK3-NEXT: ret i32 [[TMP13]]
970 //
971 //
972 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
973 // CHECK3-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
974 // CHECK3-NEXT: entry:
975 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
976 // CHECK3-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
977 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
978 // CHECK3-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
979 // CHECK3-NEXT: ret void
980 //
981 //
982 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
983 // CHECK3-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
984 // CHECK3-NEXT: entry:
985 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
986 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
987 // CHECK3-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
988 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
989 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
990 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
991 // CHECK3-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
992 // CHECK3-NEXT: ret void
993 //
994 //
995 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l56
996 // CHECK3-SAME: () #[[ATTR4]] {
997 // CHECK3-NEXT: entry:
998 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*))
999 // CHECK3-NEXT: ret void
1000 //
1001 //
1002 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
1003 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4]] {
1004 // CHECK3-NEXT: entry:
1005 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1006 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1007 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1008 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
1009 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 4
1010 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1011 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1012 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1013 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1014 // CHECK3-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
1015 // CHECK3-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
1016 // CHECK3-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1017 // CHECK3-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1018 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca %struct.S.0*, align 4
1019 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
1020 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1021 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1022 // CHECK3-NEXT: store %struct.S.0* undef, %struct.S.0** [[_TMP1]], align 4
1023 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
1024 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
1025 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1026 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1027 // CHECK3-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1028 // CHECK3-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i32 2
1029 // CHECK3-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
1030 // CHECK3: arrayctor.loop:
1031 // CHECK3-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1032 // CHECK3-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
1033 // CHECK3-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYCTOR_CUR]], i32 1
1034 // CHECK3-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1035 // CHECK3-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1036 // CHECK3: arrayctor.cont:
1037 // CHECK3-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]])
1038 // CHECK3-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[_TMP2]], align 4
1039 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1040 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1041 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1042 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1043 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
1044 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1045 // CHECK3: cond.true:
1046 // CHECK3-NEXT: br label [[COND_END:%.*]]
1047 // CHECK3: cond.false:
1048 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1049 // CHECK3-NEXT: br label [[COND_END]]
1050 // CHECK3: cond.end:
1051 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1052 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1053 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1054 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
1055 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1056 // CHECK3: omp.inner.for.cond:
1057 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1058 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1059 // CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1060 // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]]
1061 // CHECK3: omp.inner.for.cond.cleanup:
1062 // CHECK3-NEXT: br label [[OMP_INNER_FOR_END:%.*]]
1063 // CHECK3: omp.inner.for.body:
1064 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1065 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1066 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1067 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
1068 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
1069 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4
1070 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP9]]
1071 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[ARRAYIDX]], align 4
1072 // CHECK3-NEXT: [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP2]], align 4
1073 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
1074 // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 [[TMP11]]
1075 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast %struct.S.0* [[ARRAYIDX4]] to i8*
1076 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast %struct.S.0* [[TMP10]] to i8*
1077 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i32 4, i1 false)
1078 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1079 // CHECK3: omp.body.continue:
1080 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1081 // CHECK3: omp.inner.for.inc:
1082 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1083 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1
1084 // CHECK3-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4
1085 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
1086 // CHECK3: omp.inner.for.end:
1087 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1088 // CHECK3: omp.loop.exit:
1089 // CHECK3-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1090 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
1091 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP16]])
1092 // CHECK3-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR2]]
1093 // CHECK3-NEXT: [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1094 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN6]], i32 2
1095 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
1096 // CHECK3: arraydestroy.body:
1097 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP17]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1098 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i32 -1
1099 // CHECK3-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1100 // CHECK3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]]
1101 // CHECK3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE7:%.*]], label [[ARRAYDESTROY_BODY]]
1102 // CHECK3: arraydestroy.done7:
1103 // CHECK3-NEXT: ret void
1104 //
1105 //
1106 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
1107 // CHECK3-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1108 // CHECK3-NEXT: entry:
1109 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1110 // CHECK3-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1111 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1112 // CHECK3-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
1113 // CHECK3-NEXT: ret void
1114 //
1115 //
1116 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1117 // CHECK3-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1118 // CHECK3-NEXT: entry:
1119 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1120 // CHECK3-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1121 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1122 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1123 // CHECK3-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
1124 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[F]], align 4
1125 // CHECK3-NEXT: ret void
1126 //
1127 //
1128 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1129 // CHECK3-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1130 // CHECK3-NEXT: entry:
1131 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1132 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
1133 // CHECK3-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1134 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
1135 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1136 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1137 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1138 // CHECK3-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
1139 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
1140 // CHECK3-NEXT: store i32 [[ADD]], i32* [[F]], align 4
1141 // CHECK3-NEXT: ret void
1142 //
1143 //
1144 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1145 // CHECK3-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1146 // CHECK3-NEXT: entry:
1147 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 4
1148 // CHECK3-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 4
1149 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 4
1150 // CHECK3-NEXT: ret void
1151 //
1152 //
1153 // CHECK3-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_teams_distribute_private_codegen.cpp
1154 // CHECK3-SAME: () #[[ATTR0]] {
1155 // CHECK3-NEXT: entry:
1156 // CHECK3-NEXT: call void @__cxx_global_var_init()
1157 // CHECK3-NEXT: call void @__cxx_global_var_init.1()
1158 // CHECK3-NEXT: call void @__cxx_global_var_init.2()
1159 // CHECK3-NEXT: ret void
1160 //
1161 //
1162 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1163 // CHECK3-SAME: () #[[ATTR0]] {
1164 // CHECK3-NEXT: entry:
1165 // CHECK3-NEXT: call void @__tgt_register_requires(i64 1)
1166 // CHECK3-NEXT: ret void
1167 //
1168 //
1169 // CHECK9-LABEL: define {{[^@]+}}@__cxx_global_var_init
1170 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
1171 // CHECK9-NEXT: entry:
1172 // CHECK9-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) @test)
1173 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]]
1174 // CHECK9-NEXT: ret void
1175 //
1176 //
1177 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
1178 // CHECK9-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
1179 // CHECK9-NEXT: entry:
1180 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1181 // CHECK9-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1182 // CHECK9-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1183 // CHECK9-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
1184 // CHECK9-NEXT: ret void
1185 //
1186 //
1187 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
1188 // CHECK9-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1189 // CHECK9-NEXT: entry:
1190 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1191 // CHECK9-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1192 // CHECK9-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1193 // CHECK9-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR2]]
1194 // CHECK9-NEXT: ret void
1195 //
1196 //
1197 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
1198 // CHECK9-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1199 // CHECK9-NEXT: entry:
1200 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1201 // CHECK9-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1202 // CHECK9-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1203 // CHECK9-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1204 // CHECK9-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4
1205 // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
1206 // CHECK9-NEXT: store float [[CONV]], float* [[F]], align 4
1207 // CHECK9-NEXT: ret void
1208 //
1209 //
1210 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
1211 // CHECK9-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1212 // CHECK9-NEXT: entry:
1213 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1214 // CHECK9-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1215 // CHECK9-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1216 // CHECK9-NEXT: ret void
1217 //
1218 //
1219 // CHECK9-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
1220 // CHECK9-SAME: () #[[ATTR0]] {
1221 // CHECK9-NEXT: entry:
1222 // CHECK9-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00)
1223 // CHECK9-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00)
1224 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]]
1225 // CHECK9-NEXT: ret void
1226 //
1227 //
1228 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
1229 // CHECK9-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1230 // CHECK9-NEXT: entry:
1231 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1232 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
1233 // CHECK9-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1234 // CHECK9-NEXT: store float [[A]], float* [[A_ADDR]], align 4
1235 // CHECK9-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1236 // CHECK9-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1237 // CHECK9-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]])
1238 // CHECK9-NEXT: ret void
1239 //
1240 //
1241 // CHECK9-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
1242 // CHECK9-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] {
1243 // CHECK9-NEXT: entry:
1244 // CHECK9-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1245 // CHECK9-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1246 // CHECK9-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
1247 // CHECK9: arraydestroy.body:
1248 // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1249 // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1250 // CHECK9-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]]
1251 // CHECK9-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0)
1252 // CHECK9-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1253 // CHECK9: arraydestroy.done1:
1254 // CHECK9-NEXT: ret void
1255 //
1256 //
1257 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
1258 // CHECK9-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
1259 // CHECK9-NEXT: entry:
1260 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1261 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
1262 // CHECK9-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1263 // CHECK9-NEXT: store float [[A]], float* [[A_ADDR]], align 4
1264 // CHECK9-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1265 // CHECK9-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1266 // CHECK9-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
1267 // CHECK9-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4
1268 // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
1269 // CHECK9-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
1270 // CHECK9-NEXT: store float [[ADD]], float* [[F]], align 4
1271 // CHECK9-NEXT: ret void
1272 //
1273 //
1274 // CHECK9-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
1275 // CHECK9-SAME: () #[[ATTR0]] {
1276 // CHECK9-NEXT: entry:
1277 // CHECK9-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef nonnull align 4 dereferenceable(4) @var, float noundef 3.000000e+00)
1278 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]]
1279 // CHECK9-NEXT: ret void
1280 //
1281 //
1282 // CHECK9-LABEL: define {{[^@]+}}@main
1283 // CHECK9-SAME: () #[[ATTR3:[0-9]+]] {
1284 // CHECK9-NEXT: entry:
1285 // CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
1286 // CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
1287 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4
1288 // CHECK9-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
1289 // CHECK9-NEXT: ret i32 0
1290 //
1291 //
1292 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l75
1293 // CHECK9-SAME: (i64 noundef [[G1:%.*]]) #[[ATTR5:[0-9]+]] {
1294 // CHECK9-NEXT: entry:
1295 // CHECK9-NEXT: [[G1_ADDR:%.*]] = alloca i64, align 8
1296 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32*, align 8
1297 // CHECK9-NEXT: store i64 [[G1]], i64* [[G1_ADDR]], align 8
1298 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[G1_ADDR]] to i32*
1299 // CHECK9-NEXT: store i32* [[CONV]], i32** [[TMP]], align 8
1300 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1301 // CHECK9-NEXT: ret void
1302 //
1303 //
1304 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
1305 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR5]] {
1306 // CHECK9-NEXT: entry:
1307 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1308 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1309 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1310 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
1311 // CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
1312 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1313 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1314 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1315 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1316 // CHECK9-NEXT: [[G:%.*]] = alloca i32, align 4
1317 // CHECK9-NEXT: [[G1:%.*]] = alloca i32, align 4
1318 // CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8
1319 // CHECK9-NEXT: [[SIVAR:%.*]] = alloca i32, align 4
1320 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
1321 // CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1322 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1323 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1324 // CHECK9-NEXT: store i32* undef, i32** [[_TMP1]], align 8
1325 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
1326 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4
1327 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1328 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1329 // CHECK9-NEXT: store i32* [[G1]], i32** [[_TMP2]], align 8
1330 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1331 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1332 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1333 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1334 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1
1335 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1336 // CHECK9: cond.true:
1337 // CHECK9-NEXT: br label [[COND_END:%.*]]
1338 // CHECK9: cond.false:
1339 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1340 // CHECK9-NEXT: br label [[COND_END]]
1341 // CHECK9: cond.end:
1342 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1343 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1344 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1345 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
1346 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1347 // CHECK9: omp.inner.for.cond:
1348 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1349 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1350 // CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1351 // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1352 // CHECK9: omp.inner.for.body:
1353 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1354 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1355 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1356 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4
1357 // CHECK9-NEXT: store i32 1, i32* [[G]], align 4
1358 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[_TMP2]], align 8
1359 // CHECK9-NEXT: store volatile i32 1, i32* [[TMP8]], align 4
1360 // CHECK9-NEXT: store i32 2, i32* [[SIVAR]], align 4
1361 // CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1362 // CHECK9-NEXT: store i32* [[G]], i32** [[TMP9]], align 8
1363 // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
1364 // CHECK9-NEXT: [[TMP11:%.*]] = load i32*, i32** [[_TMP2]], align 8
1365 // CHECK9-NEXT: store i32* [[TMP11]], i32** [[TMP10]], align 8
1366 // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
1367 // CHECK9-NEXT: store i32* [[SIVAR]], i32** [[TMP12]], align 8
1368 // CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(24) [[REF_TMP]])
1369 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1370 // CHECK9: omp.body.continue:
1371 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1372 // CHECK9: omp.inner.for.inc:
1373 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1374 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
1375 // CHECK9-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
1376 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
1377 // CHECK9: omp.inner.for.end:
1378 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1379 // CHECK9: omp.loop.exit:
1380 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1381 // CHECK9-NEXT: ret void
1382 //
1383 //
1384 // CHECK9-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_teams_distribute_private_codegen.cpp
1385 // CHECK9-SAME: () #[[ATTR0]] {
1386 // CHECK9-NEXT: entry:
1387 // CHECK9-NEXT: call void @__cxx_global_var_init()
1388 // CHECK9-NEXT: call void @__cxx_global_var_init.1()
1389 // CHECK9-NEXT: call void @__cxx_global_var_init.2()
1390 // CHECK9-NEXT: ret void
1391 //
1392 //
1393 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1394 // CHECK9-SAME: () #[[ATTR0]] {
1395 // CHECK9-NEXT: entry:
1396 // CHECK9-NEXT: call void @__tgt_register_requires(i64 1)
1397 // CHECK9-NEXT: ret void
1398 //
1399