1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
5 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
7 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
8 
9 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
10 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
11 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5
12 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
13 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
14 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK7
15 
16 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
17 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -x c++  -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
19 
20 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
21 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
22 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -x c++  -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
23 
24 // expected-no-diagnostics
25 #ifndef HEADER
26 #define HEADER
27 
28 template <typename T>
tmain()29 T tmain() {
30   T t_var = T();
31   T vec[] = {1, 2};
32 #pragma omp target teams distribute simd reduction(+: t_var)
33   for (int i = 0; i < 2; ++i) {
34     t_var += (T) i;
35   }
36   return T();
37 }
38 
main()39 int main() {
40   static int sivar;
41 #ifdef LAMBDA
42 
43   [&]() {
44 #pragma omp target teams distribute simd reduction(+: sivar)
45   for (int i = 0; i < 2; ++i) {
46 
47     // Skip global and bound tid vars
48 
49     sivar += i;
50 
51     [&]() {
52 
53       sivar += 4;
54 
55     }();
56   }
57   }();
58   return 0;
59 #else
60 #pragma omp target teams distribute simd reduction(+: sivar)
61   for (int i = 0; i < 2; ++i) {
62     sivar += i;
63   }
64   return tmain<int>();
65 #endif
66 }
67 
68 
69 
70 
71 // Skip global and bound tid vars
72 
73 
74 
75 
76 
77 // Skip global and bound tid vars
78 
79 
80 #endif
81 // CHECK1-LABEL: define {{[^@]+}}@main
82 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
83 // CHECK1-NEXT:  entry:
84 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
85 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
86 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
87 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
88 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
89 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
90 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
91 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
92 // CHECK1-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP1]], align 8
93 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
94 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
95 // CHECK1-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP3]], align 8
96 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
97 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
98 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
99 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
100 // CHECK1-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
101 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
102 // CHECK1-NEXT:    store i32 1, i32* [[TMP7]], align 4
103 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
104 // CHECK1-NEXT:    store i32 1, i32* [[TMP8]], align 4
105 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
106 // CHECK1-NEXT:    store i8** [[TMP5]], i8*** [[TMP9]], align 8
107 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
108 // CHECK1-NEXT:    store i8** [[TMP6]], i8*** [[TMP10]], align 8
109 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
110 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP11]], align 8
111 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
112 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP12]], align 8
113 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
114 // CHECK1-NEXT:    store i8** null, i8*** [[TMP13]], align 8
115 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
116 // CHECK1-NEXT:    store i8** null, i8*** [[TMP14]], align 8
117 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
118 // CHECK1-NEXT:    store i64 2, i64* [[TMP15]], align 8
119 // CHECK1-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
120 // CHECK1-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
121 // CHECK1-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
122 // CHECK1:       omp_offload.failed:
123 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(i32* @_ZZ4mainE5sivar) #[[ATTR3:[0-9]+]]
124 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
125 // CHECK1:       omp_offload.cont:
126 // CHECK1-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v()
127 // CHECK1-NEXT:    ret i32 [[CALL]]
128 //
129 //
130 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60
131 // CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
132 // CHECK1-NEXT:  entry:
133 // CHECK1-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
134 // CHECK1-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
135 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
136 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
137 // CHECK1-NEXT:    ret void
138 //
139 //
140 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
141 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2:[0-9]+]] {
142 // CHECK1-NEXT:  entry:
143 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
144 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
145 // CHECK1-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
146 // CHECK1-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
147 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
148 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
149 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
150 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
151 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
152 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
153 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
154 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
155 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
156 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
157 // CHECK1-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
158 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
159 // CHECK1-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
160 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
161 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
162 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
163 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
164 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
165 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
166 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
167 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
168 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
169 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
170 // CHECK1:       cond.true:
171 // CHECK1-NEXT:    br label [[COND_END:%.*]]
172 // CHECK1:       cond.false:
173 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
174 // CHECK1-NEXT:    br label [[COND_END]]
175 // CHECK1:       cond.end:
176 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
177 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
178 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
179 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
180 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
181 // CHECK1:       omp.inner.for.cond:
182 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
183 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !5
184 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
185 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
186 // CHECK1:       omp.inner.for.body:
187 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
188 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
189 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
190 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !5
191 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5
192 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !5
193 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
194 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !5
195 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
196 // CHECK1:       omp.body.continue:
197 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
198 // CHECK1:       omp.inner.for.inc:
199 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
200 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
201 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5
202 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
203 // CHECK1:       omp.inner.for.end:
204 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
205 // CHECK1:       omp.loop.exit:
206 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
207 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
208 // CHECK1-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
209 // CHECK1-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
210 // CHECK1:       .omp.final.then:
211 // CHECK1-NEXT:    store i32 2, i32* [[I]], align 4
212 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
213 // CHECK1:       .omp.final.done:
214 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
215 // CHECK1-NEXT:    [[TMP15:%.*]] = bitcast i32* [[SIVAR1]] to i8*
216 // CHECK1-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 8
217 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
218 // CHECK1-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP16]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
219 // CHECK1-NEXT:    switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
220 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
221 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
222 // CHECK1-NEXT:    ]
223 // CHECK1:       .omp.reduction.case1:
224 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP0]], align 4
225 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[SIVAR1]], align 4
226 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
227 // CHECK1-NEXT:    store i32 [[ADD5]], i32* [[TMP0]], align 4
228 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
229 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
230 // CHECK1:       .omp.reduction.case2:
231 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[SIVAR1]], align 4
232 // CHECK1-NEXT:    [[TMP21:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP20]] monotonic, align 4
233 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
234 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
235 // CHECK1:       .omp.reduction.default:
236 // CHECK1-NEXT:    ret void
237 //
238 //
239 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
240 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
241 // CHECK1-NEXT:  entry:
242 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
243 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
244 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
245 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
246 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
247 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
248 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
249 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
250 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
251 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
252 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
253 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
254 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
255 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
256 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
257 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
258 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
259 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
260 // CHECK1-NEXT:    ret void
261 //
262 //
263 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
264 // CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat {
265 // CHECK1-NEXT:  entry:
266 // CHECK1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
267 // CHECK1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
268 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
269 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
270 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
271 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
272 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR]], align 4
273 // CHECK1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
274 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
275 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
276 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8** [[TMP1]] to i32**
277 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[TMP2]], align 8
278 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
279 // CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
280 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[TMP4]], align 8
281 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
282 // CHECK1-NEXT:    store i8* null, i8** [[TMP5]], align 8
283 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
284 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
285 // CHECK1-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
286 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
287 // CHECK1-NEXT:    store i32 1, i32* [[TMP8]], align 4
288 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
289 // CHECK1-NEXT:    store i32 1, i32* [[TMP9]], align 4
290 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
291 // CHECK1-NEXT:    store i8** [[TMP6]], i8*** [[TMP10]], align 8
292 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
293 // CHECK1-NEXT:    store i8** [[TMP7]], i8*** [[TMP11]], align 8
294 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
295 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.3, i32 0, i32 0), i64** [[TMP12]], align 8
296 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
297 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i64** [[TMP13]], align 8
298 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
299 // CHECK1-NEXT:    store i8** null, i8*** [[TMP14]], align 8
300 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
301 // CHECK1-NEXT:    store i8** null, i8*** [[TMP15]], align 8
302 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
303 // CHECK1-NEXT:    store i64 2, i64* [[TMP16]], align 8
304 // CHECK1-NEXT:    [[TMP17:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
305 // CHECK1-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
306 // CHECK1-NEXT:    br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
307 // CHECK1:       omp_offload.failed:
308 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32* [[T_VAR]]) #[[ATTR3]]
309 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
310 // CHECK1:       omp_offload.cont:
311 // CHECK1-NEXT:    ret i32 0
312 //
313 //
314 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32
315 // CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
316 // CHECK1-NEXT:  entry:
317 // CHECK1-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
318 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
319 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
320 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[TMP0]])
321 // CHECK1-NEXT:    ret void
322 //
323 //
324 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
325 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR2]] {
326 // CHECK1-NEXT:  entry:
327 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
328 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
329 // CHECK1-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
330 // CHECK1-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
331 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
332 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
333 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
334 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
335 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
336 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
337 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
338 // CHECK1-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
339 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
340 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
341 // CHECK1-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
342 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
343 // CHECK1-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
344 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
345 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
346 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
347 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
348 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
349 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
350 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
351 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
352 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
353 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
354 // CHECK1:       cond.true:
355 // CHECK1-NEXT:    br label [[COND_END:%.*]]
356 // CHECK1:       cond.false:
357 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
358 // CHECK1-NEXT:    br label [[COND_END]]
359 // CHECK1:       cond.end:
360 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
361 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
362 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
363 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
364 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
365 // CHECK1:       omp.inner.for.cond:
366 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
367 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
368 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
369 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
370 // CHECK1:       omp.inner.for.body:
371 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
372 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
373 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
374 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
375 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11
376 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !11
377 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
378 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[T_VAR1]], align 4, !llvm.access.group !11
379 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
380 // CHECK1:       omp.body.continue:
381 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
382 // CHECK1:       omp.inner.for.inc:
383 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
384 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
385 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
386 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
387 // CHECK1:       omp.inner.for.end:
388 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
389 // CHECK1:       omp.loop.exit:
390 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
391 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
392 // CHECK1-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
393 // CHECK1-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
394 // CHECK1:       .omp.final.then:
395 // CHECK1-NEXT:    store i32 2, i32* [[I]], align 4
396 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
397 // CHECK1:       .omp.final.done:
398 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
399 // CHECK1-NEXT:    [[TMP15:%.*]] = bitcast i32* [[T_VAR1]] to i8*
400 // CHECK1-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 8
401 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
402 // CHECK1-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP16]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
403 // CHECK1-NEXT:    switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
404 // CHECK1-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
405 // CHECK1-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
406 // CHECK1-NEXT:    ]
407 // CHECK1:       .omp.reduction.case1:
408 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP0]], align 4
409 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[T_VAR1]], align 4
410 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
411 // CHECK1-NEXT:    store i32 [[ADD5]], i32* [[TMP0]], align 4
412 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
413 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
414 // CHECK1:       .omp.reduction.case2:
415 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[T_VAR1]], align 4
416 // CHECK1-NEXT:    [[TMP21:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP20]] monotonic, align 4
417 // CHECK1-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
418 // CHECK1-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
419 // CHECK1:       .omp.reduction.default:
420 // CHECK1-NEXT:    ret void
421 //
422 //
423 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
424 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] {
425 // CHECK1-NEXT:  entry:
426 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
427 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
428 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
429 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
430 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
431 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
432 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
433 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
434 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
435 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
436 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
437 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
438 // CHECK1-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
439 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
440 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
441 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
442 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
443 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
444 // CHECK1-NEXT:    ret void
445 //
446 //
447 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
448 // CHECK1-SAME: () #[[ATTR8:[0-9]+]] {
449 // CHECK1-NEXT:  entry:
450 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
451 // CHECK1-NEXT:    ret void
452 //
453 //
454 // CHECK3-LABEL: define {{[^@]+}}@main
455 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
456 // CHECK3-NEXT:  entry:
457 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
458 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
459 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
460 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
461 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
462 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
463 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
464 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
465 // CHECK3-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP1]], align 4
466 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
467 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
468 // CHECK3-NEXT:    store i32* @_ZZ4mainE5sivar, i32** [[TMP3]], align 4
469 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
470 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
471 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
472 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
473 // CHECK3-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
474 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
475 // CHECK3-NEXT:    store i32 1, i32* [[TMP7]], align 4
476 // CHECK3-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
477 // CHECK3-NEXT:    store i32 1, i32* [[TMP8]], align 4
478 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
479 // CHECK3-NEXT:    store i8** [[TMP5]], i8*** [[TMP9]], align 4
480 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
481 // CHECK3-NEXT:    store i8** [[TMP6]], i8*** [[TMP10]], align 4
482 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
483 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP11]], align 4
484 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
485 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP12]], align 4
486 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
487 // CHECK3-NEXT:    store i8** null, i8*** [[TMP13]], align 4
488 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
489 // CHECK3-NEXT:    store i8** null, i8*** [[TMP14]], align 4
490 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
491 // CHECK3-NEXT:    store i64 2, i64* [[TMP15]], align 8
492 // CHECK3-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
493 // CHECK3-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
494 // CHECK3-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
495 // CHECK3:       omp_offload.failed:
496 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(i32* @_ZZ4mainE5sivar) #[[ATTR3:[0-9]+]]
497 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
498 // CHECK3:       omp_offload.cont:
499 // CHECK3-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
500 // CHECK3-NEXT:    ret i32 [[CALL]]
501 //
502 //
503 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60
504 // CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
505 // CHECK3-NEXT:  entry:
506 // CHECK3-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 4
507 // CHECK3-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4
508 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4
509 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
510 // CHECK3-NEXT:    ret void
511 //
512 //
513 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
514 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2:[0-9]+]] {
515 // CHECK3-NEXT:  entry:
516 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
517 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
518 // CHECK3-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 4
519 // CHECK3-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
520 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
521 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
522 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
523 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
524 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
525 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
526 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
527 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
528 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
529 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
530 // CHECK3-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4
531 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4
532 // CHECK3-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
533 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
534 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
535 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
536 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
537 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
538 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
539 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
540 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
541 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
542 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
543 // CHECK3:       cond.true:
544 // CHECK3-NEXT:    br label [[COND_END:%.*]]
545 // CHECK3:       cond.false:
546 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
547 // CHECK3-NEXT:    br label [[COND_END]]
548 // CHECK3:       cond.end:
549 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
550 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
551 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
552 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
553 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
554 // CHECK3:       omp.inner.for.cond:
555 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
556 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6
557 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
558 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
559 // CHECK3:       omp.inner.for.body:
560 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
561 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
562 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
563 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6
564 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
565 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !6
566 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
567 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !6
568 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
569 // CHECK3:       omp.body.continue:
570 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
571 // CHECK3:       omp.inner.for.inc:
572 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
573 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
574 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
575 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
576 // CHECK3:       omp.inner.for.end:
577 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
578 // CHECK3:       omp.loop.exit:
579 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
580 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
581 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
582 // CHECK3-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
583 // CHECK3:       .omp.final.then:
584 // CHECK3-NEXT:    store i32 2, i32* [[I]], align 4
585 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
586 // CHECK3:       .omp.final.done:
587 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
588 // CHECK3-NEXT:    [[TMP15:%.*]] = bitcast i32* [[SIVAR1]] to i8*
589 // CHECK3-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 4
590 // CHECK3-NEXT:    [[TMP16:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
591 // CHECK3-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, i8* [[TMP16]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
592 // CHECK3-NEXT:    switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
593 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
594 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
595 // CHECK3-NEXT:    ]
596 // CHECK3:       .omp.reduction.case1:
597 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP0]], align 4
598 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[SIVAR1]], align 4
599 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
600 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[TMP0]], align 4
601 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
602 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
603 // CHECK3:       .omp.reduction.case2:
604 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[SIVAR1]], align 4
605 // CHECK3-NEXT:    [[TMP21:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP20]] monotonic, align 4
606 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
607 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
608 // CHECK3:       .omp.reduction.default:
609 // CHECK3-NEXT:    ret void
610 //
611 //
612 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
613 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
614 // CHECK3-NEXT:  entry:
615 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
616 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
617 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
618 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
619 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
620 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
621 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
622 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
623 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
624 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
625 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
626 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
627 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
628 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
629 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
630 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
631 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
632 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
633 // CHECK3-NEXT:    ret void
634 //
635 //
636 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
637 // CHECK3-SAME: () #[[ATTR6:[0-9]+]] comdat {
638 // CHECK3-NEXT:  entry:
639 // CHECK3-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
640 // CHECK3-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
641 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
642 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
643 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
644 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
645 // CHECK3-NEXT:    store i32 0, i32* [[T_VAR]], align 4
646 // CHECK3-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
647 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
648 // CHECK3-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
649 // CHECK3-NEXT:    [[TMP2:%.*]] = bitcast i8** [[TMP1]] to i32**
650 // CHECK3-NEXT:    store i32* [[T_VAR]], i32** [[TMP2]], align 4
651 // CHECK3-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
652 // CHECK3-NEXT:    [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
653 // CHECK3-NEXT:    store i32* [[T_VAR]], i32** [[TMP4]], align 4
654 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
655 // CHECK3-NEXT:    store i8* null, i8** [[TMP5]], align 4
656 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
657 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
658 // CHECK3-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
659 // CHECK3-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
660 // CHECK3-NEXT:    store i32 1, i32* [[TMP8]], align 4
661 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
662 // CHECK3-NEXT:    store i32 1, i32* [[TMP9]], align 4
663 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
664 // CHECK3-NEXT:    store i8** [[TMP6]], i8*** [[TMP10]], align 4
665 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
666 // CHECK3-NEXT:    store i8** [[TMP7]], i8*** [[TMP11]], align 4
667 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
668 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.3, i32 0, i32 0), i64** [[TMP12]], align 4
669 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
670 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i64** [[TMP13]], align 4
671 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
672 // CHECK3-NEXT:    store i8** null, i8*** [[TMP14]], align 4
673 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
674 // CHECK3-NEXT:    store i8** null, i8*** [[TMP15]], align 4
675 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
676 // CHECK3-NEXT:    store i64 2, i64* [[TMP16]], align 8
677 // CHECK3-NEXT:    [[TMP17:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 -1, i32 0, i32 1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
678 // CHECK3-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0
679 // CHECK3-NEXT:    br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
680 // CHECK3:       omp_offload.failed:
681 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32* [[T_VAR]]) #[[ATTR3]]
682 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
683 // CHECK3:       omp_offload.cont:
684 // CHECK3-NEXT:    ret i32 0
685 //
686 //
687 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32
688 // CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
689 // CHECK3-NEXT:  entry:
690 // CHECK3-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 4
691 // CHECK3-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4
692 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4
693 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[TMP0]])
694 // CHECK3-NEXT:    ret void
695 //
696 //
697 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
698 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR2]] {
699 // CHECK3-NEXT:  entry:
700 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
701 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
702 // CHECK3-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i32*, align 4
703 // CHECK3-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
704 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
705 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
706 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
707 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
708 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
709 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
710 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
711 // CHECK3-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
712 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
713 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
714 // CHECK3-NEXT:    store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4
715 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4
716 // CHECK3-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
717 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
718 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
719 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
720 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
721 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
722 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
723 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
724 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
725 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
726 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
727 // CHECK3:       cond.true:
728 // CHECK3-NEXT:    br label [[COND_END:%.*]]
729 // CHECK3:       cond.false:
730 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
731 // CHECK3-NEXT:    br label [[COND_END]]
732 // CHECK3:       cond.end:
733 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
734 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
735 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
736 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
737 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
738 // CHECK3:       omp.inner.for.cond:
739 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
740 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
741 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
742 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
743 // CHECK3:       omp.inner.for.body:
744 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
745 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
746 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
747 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
748 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12
749 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !12
750 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
751 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[T_VAR1]], align 4, !llvm.access.group !12
752 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
753 // CHECK3:       omp.body.continue:
754 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
755 // CHECK3:       omp.inner.for.inc:
756 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
757 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
758 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
759 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
760 // CHECK3:       omp.inner.for.end:
761 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
762 // CHECK3:       omp.loop.exit:
763 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
764 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
765 // CHECK3-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
766 // CHECK3-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
767 // CHECK3:       .omp.final.then:
768 // CHECK3-NEXT:    store i32 2, i32* [[I]], align 4
769 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
770 // CHECK3:       .omp.final.done:
771 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
772 // CHECK3-NEXT:    [[TMP15:%.*]] = bitcast i32* [[T_VAR1]] to i8*
773 // CHECK3-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 4
774 // CHECK3-NEXT:    [[TMP16:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
775 // CHECK3-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, i8* [[TMP16]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
776 // CHECK3-NEXT:    switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
777 // CHECK3-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
778 // CHECK3-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
779 // CHECK3-NEXT:    ]
780 // CHECK3:       .omp.reduction.case1:
781 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP0]], align 4
782 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[T_VAR1]], align 4
783 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
784 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[TMP0]], align 4
785 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
786 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
787 // CHECK3:       .omp.reduction.case2:
788 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[T_VAR1]], align 4
789 // CHECK3-NEXT:    [[TMP21:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP20]] monotonic, align 4
790 // CHECK3-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
791 // CHECK3-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
792 // CHECK3:       .omp.reduction.default:
793 // CHECK3-NEXT:    ret void
794 //
795 //
796 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
797 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] {
798 // CHECK3-NEXT:  entry:
799 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 4
800 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 4
801 // CHECK3-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 4
802 // CHECK3-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
803 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
804 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
805 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
806 // CHECK3-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
807 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
808 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
809 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
810 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
811 // CHECK3-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
812 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
813 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
814 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
815 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
816 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
817 // CHECK3-NEXT:    ret void
818 //
819 //
820 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
821 // CHECK3-SAME: () #[[ATTR8:[0-9]+]] {
822 // CHECK3-NEXT:  entry:
823 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
824 // CHECK3-NEXT:    ret void
825 //
826 //
827 // CHECK5-LABEL: define {{[^@]+}}@main
828 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
829 // CHECK5-NEXT:  entry:
830 // CHECK5-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
831 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
832 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
833 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
834 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
835 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
836 // CHECK5-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
837 // CHECK5-NEXT:    store i32 0, i32* [[RETVAL]], align 4
838 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
839 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
840 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
841 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
842 // CHECK5-NEXT:    store i32 0, i32* [[SIVAR]], align 4
843 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
844 // CHECK5:       omp.inner.for.cond:
845 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
846 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
847 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
848 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
849 // CHECK5:       omp.inner.for.body:
850 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
851 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
852 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
853 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
854 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2
855 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !2
856 // CHECK5-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]]
857 // CHECK5-NEXT:    store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !2
858 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
859 // CHECK5:       omp.body.continue:
860 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
861 // CHECK5:       omp.inner.for.inc:
862 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
863 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
864 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
865 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
866 // CHECK5:       omp.inner.for.end:
867 // CHECK5-NEXT:    store i32 2, i32* [[I]], align 4
868 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
869 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4
870 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
871 // CHECK5-NEXT:    store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4
872 // CHECK5-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v()
873 // CHECK5-NEXT:    ret i32 [[CALL]]
874 //
875 //
876 // CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
877 // CHECK5-SAME: () #[[ATTR1:[0-9]+]] comdat {
878 // CHECK5-NEXT:  entry:
879 // CHECK5-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
880 // CHECK5-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
881 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
882 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
883 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
884 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
885 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
886 // CHECK5-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
887 // CHECK5-NEXT:    store i32 0, i32* [[T_VAR]], align 4
888 // CHECK5-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
889 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
890 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
891 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
892 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
893 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4
894 // CHECK5-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
895 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
896 // CHECK5:       omp.inner.for.cond:
897 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
898 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6
899 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
900 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
901 // CHECK5:       omp.inner.for.body:
902 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
903 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
904 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
905 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6
906 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6
907 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !6
908 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]]
909 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !6
910 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
911 // CHECK5:       omp.body.continue:
912 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
913 // CHECK5:       omp.inner.for.inc:
914 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
915 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
916 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6
917 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
918 // CHECK5:       omp.inner.for.end:
919 // CHECK5-NEXT:    store i32 2, i32* [[I]], align 4
920 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
921 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4
922 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
923 // CHECK5-NEXT:    store i32 [[ADD4]], i32* [[T_VAR]], align 4
924 // CHECK5-NEXT:    ret i32 0
925 //
926 //
927 // CHECK7-LABEL: define {{[^@]+}}@main
928 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
929 // CHECK7-NEXT:  entry:
930 // CHECK7-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
931 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
932 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
933 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
934 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
935 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
936 // CHECK7-NEXT:    [[SIVAR:%.*]] = alloca i32, align 4
937 // CHECK7-NEXT:    store i32 0, i32* [[RETVAL]], align 4
938 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
939 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
940 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
941 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
942 // CHECK7-NEXT:    store i32 0, i32* [[SIVAR]], align 4
943 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
944 // CHECK7:       omp.inner.for.cond:
945 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
946 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
947 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
948 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
949 // CHECK7:       omp.inner.for.body:
950 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
951 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
952 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
953 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
954 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3
955 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !3
956 // CHECK7-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]]
957 // CHECK7-NEXT:    store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !3
958 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
959 // CHECK7:       omp.body.continue:
960 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
961 // CHECK7:       omp.inner.for.inc:
962 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
963 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
964 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
965 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
966 // CHECK7:       omp.inner.for.end:
967 // CHECK7-NEXT:    store i32 2, i32* [[I]], align 4
968 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
969 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4
970 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
971 // CHECK7-NEXT:    store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4
972 // CHECK7-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
973 // CHECK7-NEXT:    ret i32 [[CALL]]
974 //
975 //
976 // CHECK7-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
977 // CHECK7-SAME: () #[[ATTR1:[0-9]+]] comdat {
978 // CHECK7-NEXT:  entry:
979 // CHECK7-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
980 // CHECK7-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
981 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
982 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
983 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
984 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
985 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
986 // CHECK7-NEXT:    [[T_VAR1:%.*]] = alloca i32, align 4
987 // CHECK7-NEXT:    store i32 0, i32* [[T_VAR]], align 4
988 // CHECK7-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
989 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false)
990 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
991 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
992 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
993 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4
994 // CHECK7-NEXT:    store i32 0, i32* [[T_VAR1]], align 4
995 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
996 // CHECK7:       omp.inner.for.cond:
997 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
998 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7
999 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]]
1000 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1001 // CHECK7:       omp.inner.for.body:
1002 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
1003 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
1004 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1005 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !7
1006 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7
1007 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !7
1008 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]]
1009 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !7
1010 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1011 // CHECK7:       omp.body.continue:
1012 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1013 // CHECK7:       omp.inner.for.inc:
1014 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
1015 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
1016 // CHECK7-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7
1017 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
1018 // CHECK7:       omp.inner.for.end:
1019 // CHECK7-NEXT:    store i32 2, i32* [[I]], align 4
1020 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4
1021 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4
1022 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
1023 // CHECK7-NEXT:    store i32 [[ADD4]], i32* [[T_VAR]], align 4
1024 // CHECK7-NEXT:    ret i32 0
1025 //
1026 //
1027 // CHECK9-LABEL: define {{[^@]+}}@main
1028 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
1029 // CHECK9-NEXT:  entry:
1030 // CHECK9-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1031 // CHECK9-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
1032 // CHECK9-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1033 // CHECK9-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
1034 // CHECK9-NEXT:    ret i32 0
1035 //
1036 //
1037 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44
1038 // CHECK9-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2:[0-9]+]] {
1039 // CHECK9-NEXT:  entry:
1040 // CHECK9-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
1041 // CHECK9-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
1042 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
1043 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[TMP0]])
1044 // CHECK9-NEXT:    ret void
1045 //
1046 //
1047 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
1048 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR3:[0-9]+]] {
1049 // CHECK9-NEXT:  entry:
1050 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1051 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1052 // CHECK9-NEXT:    [[SIVAR_ADDR:%.*]] = alloca i32*, align 8
1053 // CHECK9-NEXT:    [[SIVAR1:%.*]] = alloca i32, align 4
1054 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1055 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1056 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1057 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1058 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1059 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1060 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
1061 // CHECK9-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1062 // CHECK9-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1063 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1064 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1065 // CHECK9-NEXT:    store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8
1066 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8
1067 // CHECK9-NEXT:    store i32 0, i32* [[SIVAR1]], align 4
1068 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1069 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
1070 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1071 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1072 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1073 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1074 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1075 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1076 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
1077 // CHECK9-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1078 // CHECK9:       cond.true:
1079 // CHECK9-NEXT:    br label [[COND_END:%.*]]
1080 // CHECK9:       cond.false:
1081 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1082 // CHECK9-NEXT:    br label [[COND_END]]
1083 // CHECK9:       cond.end:
1084 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1085 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1086 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1087 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1088 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1089 // CHECK9:       omp.inner.for.cond:
1090 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
1091 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4
1092 // CHECK9-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1093 // CHECK9-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1094 // CHECK9:       omp.inner.for.body:
1095 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
1096 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
1097 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1098 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4
1099 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !4
1100 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !4
1101 // CHECK9-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
1102 // CHECK9-NEXT:    store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !4
1103 // CHECK9-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1104 // CHECK9-NEXT:    store i32* [[SIVAR1]], i32** [[TMP11]], align 8, !llvm.access.group !4
1105 // CHECK9-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group !4
1106 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1107 // CHECK9:       omp.body.continue:
1108 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1109 // CHECK9:       omp.inner.for.inc:
1110 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
1111 // CHECK9-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
1112 // CHECK9-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4
1113 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
1114 // CHECK9:       omp.inner.for.end:
1115 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1116 // CHECK9:       omp.loop.exit:
1117 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1118 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1119 // CHECK9-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1120 // CHECK9-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1121 // CHECK9:       .omp.final.then:
1122 // CHECK9-NEXT:    store i32 2, i32* [[I]], align 4
1123 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1124 // CHECK9:       .omp.final.done:
1125 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1126 // CHECK9-NEXT:    [[TMP16:%.*]] = bitcast i32* [[SIVAR1]] to i8*
1127 // CHECK9-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8
1128 // CHECK9-NEXT:    [[TMP17:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1129 // CHECK9-NEXT:    [[TMP18:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP17]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1130 // CHECK9-NEXT:    switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1131 // CHECK9-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1132 // CHECK9-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1133 // CHECK9-NEXT:    ]
1134 // CHECK9:       .omp.reduction.case1:
1135 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP0]], align 4
1136 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[SIVAR1]], align 4
1137 // CHECK9-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
1138 // CHECK9-NEXT:    store i32 [[ADD5]], i32* [[TMP0]], align 4
1139 // CHECK9-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1140 // CHECK9-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1141 // CHECK9:       .omp.reduction.case2:
1142 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[SIVAR1]], align 4
1143 // CHECK9-NEXT:    [[TMP22:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP21]] monotonic, align 4
1144 // CHECK9-NEXT:    call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1145 // CHECK9-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1146 // CHECK9:       .omp.reduction.default:
1147 // CHECK9-NEXT:    ret void
1148 //
1149 //
1150 // CHECK9-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1151 // CHECK9-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
1152 // CHECK9-NEXT:  entry:
1153 // CHECK9-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1154 // CHECK9-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1155 // CHECK9-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1156 // CHECK9-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1157 // CHECK9-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1158 // CHECK9-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1159 // CHECK9-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1160 // CHECK9-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1161 // CHECK9-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1162 // CHECK9-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1163 // CHECK9-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1164 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1165 // CHECK9-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1166 // CHECK9-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1167 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1168 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
1169 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1170 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1171 // CHECK9-NEXT:    ret void
1172 //
1173 //
1174 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1175 // CHECK9-SAME: () #[[ATTR7:[0-9]+]] {
1176 // CHECK9-NEXT:  entry:
1177 // CHECK9-NEXT:    call void @__tgt_register_requires(i64 1)
1178 // CHECK9-NEXT:    ret void
1179 //
1180 //
1181 // CHECK11-LABEL: define {{[^@]+}}@main
1182 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
1183 // CHECK11-NEXT:  entry:
1184 // CHECK11-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1185 // CHECK11-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
1186 // CHECK11-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1187 // CHECK11-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
1188 // CHECK11-NEXT:    ret i32 0
1189 //
1190