1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // expected-no-diagnostics
3 #ifndef HEADER
4 #define HEADER
5 
6 // Test host codegen.
7 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
8 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
9 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
10 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
11 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
12 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
13 
14 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
15 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
16 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
17 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
18 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
19 // RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
20 #ifdef CK1
21 
22 template <typename T, int X, long long Y>
23 struct SS{
24   T a[X][Y];
25 
fooSS26   int foo(void) {
27 
28     #pragma omp target teams distribute collapse(2)
29     for(int i = 0; i < X; i++) {
30       for(int j = 0; j < Y; j++) {
31         a[i][j] = (T)0;
32       }
33     }
34 
35     // discard loop variables not needed here
36 
37     return a[0][0];
38   }
39 };
40 
teams_template_struct(void)41 int teams_template_struct(void) {
42   SS<int, 123, 456> V;
43   return V.foo();
44 
45 }
46 #endif // CK1
47 
48 // Test host codegen.
49 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
50 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
51 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
52 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
53 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
54 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
55 
56 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
57 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
58 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
59 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
60 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
61 // RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
62 #ifdef CK2
63 
64 template <typename T, int n, int m>
tmain(T argc)65 int tmain(T argc) {
66   T a[n][m];
67   #pragma omp target teams distribute collapse(2)
68   for(int i = 0; i < n; i++) {
69     for(int j = 0; j < m; j++) {
70       a[i][j] = (T)0;
71     }
72   }
73   return 0;
74 }
75 
main(int argc,char ** argv)76 int main (int argc, char **argv) {
77   int n = 100;
78   int m = 2;
79   int a[n][m];
80   #pragma omp target teams distribute collapse(2)
81   for(int i = 0; i < n; i++) {
82     for(int j = 0; j < m; j++) {
83       a[i][j] = 0;
84     }
85   }
86   return tmain<int, 10, 2>(argc);
87 }
88 
89 
90 
91 
92 
93 // discard loop variables not needed here
94 
95 #endif // CK2
96 #endif // #ifndef HEADER
97 // CHECK1-LABEL: define {{[^@]+}}@_Z21teams_template_structv
98 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
99 // CHECK1-NEXT:  entry:
100 // CHECK1-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
101 // CHECK1-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef nonnull align 4 dereferenceable(224352) [[V]])
102 // CHECK1-NEXT:    ret i32 [[CALL]]
103 //
104 //
105 // CHECK1-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
106 // CHECK1-SAME: (%struct.SS* noundef nonnull align 4 dereferenceable(224352) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
107 // CHECK1-NEXT:  entry:
108 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
109 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
110 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
111 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
112 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
113 // CHECK1-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
114 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
115 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
116 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
117 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
118 // CHECK1-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
119 // CHECK1-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
120 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
121 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x [456 x i32]]**
122 // CHECK1-NEXT:    store [123 x [456 x i32]]* [[A]], [123 x [456 x i32]]** [[TMP3]], align 8
123 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
124 // CHECK1-NEXT:    store i8* null, i8** [[TMP4]], align 8
125 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
126 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
127 // CHECK1-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
128 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
129 // CHECK1-NEXT:    store i32 1, i32* [[TMP7]], align 4
130 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
131 // CHECK1-NEXT:    store i32 1, i32* [[TMP8]], align 4
132 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
133 // CHECK1-NEXT:    store i8** [[TMP5]], i8*** [[TMP9]], align 8
134 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
135 // CHECK1-NEXT:    store i8** [[TMP6]], i8*** [[TMP10]], align 8
136 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
137 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP11]], align 8
138 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
139 // CHECK1-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP12]], align 8
140 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
141 // CHECK1-NEXT:    store i8** null, i8*** [[TMP13]], align 8
142 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
143 // CHECK1-NEXT:    store i8** null, i8*** [[TMP14]], align 8
144 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
145 // CHECK1-NEXT:    store i64 56088, i64* [[TMP15]], align 8
146 // CHECK1-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
147 // CHECK1-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
148 // CHECK1-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
149 // CHECK1:       omp_offload.failed:
150 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
151 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
152 // CHECK1:       omp_offload.cont:
153 // CHECK1-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
154 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], [123 x [456 x i32]]* [[A3]], i64 0, i64 0
155 // CHECK1-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], [456 x i32]* [[ARRAYIDX]], i64 0, i64 0
156 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4
157 // CHECK1-NEXT:    ret i32 [[TMP18]]
158 //
159 //
160 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28
161 // CHECK1-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
162 // CHECK1-NEXT:  entry:
163 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
164 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
165 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
166 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
167 // CHECK1-NEXT:    ret void
168 //
169 //
170 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
171 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] {
172 // CHECK1-NEXT:  entry:
173 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
174 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
175 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
176 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
177 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
178 // CHECK1-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
179 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
180 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
181 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
182 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
183 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
184 // CHECK1-NEXT:    [[J:%.*]] = alloca i32, align 4
185 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
186 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
187 // CHECK1-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
188 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
189 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
190 // CHECK1-NEXT:    store i32 56087, i32* [[DOTOMP_UB]], align 4
191 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
192 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
193 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
194 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
195 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
196 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
197 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087
198 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
199 // CHECK1:       cond.true:
200 // CHECK1-NEXT:    br label [[COND_END:%.*]]
201 // CHECK1:       cond.false:
202 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
203 // CHECK1-NEXT:    br label [[COND_END]]
204 // CHECK1:       cond.end:
205 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
206 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
207 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
208 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
209 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
210 // CHECK1:       omp.inner.for.cond:
211 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
212 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
213 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
214 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
215 // CHECK1:       omp.inner.for.body:
216 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
217 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 456
218 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
219 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
220 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
221 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
222 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
223 // CHECK1-NEXT:    [[DIV3:%.*]] = sdiv i32 [[TMP10]], 456
224 // CHECK1-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456
225 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]]
226 // CHECK1-NEXT:    [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
227 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
228 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[J]], align 4
229 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
230 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
231 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
232 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], [123 x [456 x i32]]* [[A]], i64 0, i64 [[IDXPROM]]
233 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
234 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP12]] to i64
235 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [456 x i32], [456 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]]
236 // CHECK1-NEXT:    store i32 0, i32* [[ARRAYIDX8]], align 4
237 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
238 // CHECK1:       omp.body.continue:
239 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
240 // CHECK1:       omp.inner.for.inc:
241 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
242 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
243 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4
244 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
245 // CHECK1:       omp.inner.for.end:
246 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
247 // CHECK1:       omp.loop.exit:
248 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
249 // CHECK1-NEXT:    ret void
250 //
251 //
252 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
253 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
254 // CHECK1-NEXT:  entry:
255 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
256 // CHECK1-NEXT:    ret void
257 //
258 //
259 // CHECK3-LABEL: define {{[^@]+}}@_Z21teams_template_structv
260 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
261 // CHECK3-NEXT:  entry:
262 // CHECK3-NEXT:    [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
263 // CHECK3-NEXT:    [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef nonnull align 4 dereferenceable(224352) [[V]])
264 // CHECK3-NEXT:    ret i32 [[CALL]]
265 //
266 //
267 // CHECK3-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
268 // CHECK3-SAME: (%struct.SS* noundef nonnull align 4 dereferenceable(224352) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
269 // CHECK3-NEXT:  entry:
270 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
271 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
272 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
273 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
274 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
275 // CHECK3-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
276 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
277 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
278 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
279 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
280 // CHECK3-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
281 // CHECK3-NEXT:    store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4
282 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
283 // CHECK3-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x [456 x i32]]**
284 // CHECK3-NEXT:    store [123 x [456 x i32]]* [[A]], [123 x [456 x i32]]** [[TMP3]], align 4
285 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
286 // CHECK3-NEXT:    store i8* null, i8** [[TMP4]], align 4
287 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
288 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
289 // CHECK3-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
290 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
291 // CHECK3-NEXT:    store i32 1, i32* [[TMP7]], align 4
292 // CHECK3-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
293 // CHECK3-NEXT:    store i32 1, i32* [[TMP8]], align 4
294 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
295 // CHECK3-NEXT:    store i8** [[TMP5]], i8*** [[TMP9]], align 4
296 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
297 // CHECK3-NEXT:    store i8** [[TMP6]], i8*** [[TMP10]], align 4
298 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
299 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP11]], align 4
300 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
301 // CHECK3-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP12]], align 4
302 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
303 // CHECK3-NEXT:    store i8** null, i8*** [[TMP13]], align 4
304 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
305 // CHECK3-NEXT:    store i8** null, i8*** [[TMP14]], align 4
306 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
307 // CHECK3-NEXT:    store i64 56088, i64* [[TMP15]], align 8
308 // CHECK3-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
309 // CHECK3-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
310 // CHECK3-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
311 // CHECK3:       omp_offload.failed:
312 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
313 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
314 // CHECK3:       omp_offload.cont:
315 // CHECK3-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
316 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], [123 x [456 x i32]]* [[A3]], i32 0, i32 0
317 // CHECK3-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], [456 x i32]* [[ARRAYIDX]], i32 0, i32 0
318 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4
319 // CHECK3-NEXT:    ret i32 [[TMP18]]
320 //
321 //
322 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28
323 // CHECK3-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
324 // CHECK3-NEXT:  entry:
325 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
326 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
327 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
328 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
329 // CHECK3-NEXT:    ret void
330 //
331 //
332 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
333 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] {
334 // CHECK3-NEXT:  entry:
335 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
336 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
337 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
338 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
339 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
340 // CHECK3-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
341 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
342 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
343 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
344 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
345 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
346 // CHECK3-NEXT:    [[J:%.*]] = alloca i32, align 4
347 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
348 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
349 // CHECK3-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
350 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
351 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
352 // CHECK3-NEXT:    store i32 56087, i32* [[DOTOMP_UB]], align 4
353 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
354 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
355 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
356 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
357 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
358 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
359 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087
360 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
361 // CHECK3:       cond.true:
362 // CHECK3-NEXT:    br label [[COND_END:%.*]]
363 // CHECK3:       cond.false:
364 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
365 // CHECK3-NEXT:    br label [[COND_END]]
366 // CHECK3:       cond.end:
367 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
368 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
369 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
370 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
371 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
372 // CHECK3:       omp.inner.for.cond:
373 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
374 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
375 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
376 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
377 // CHECK3:       omp.inner.for.body:
378 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
379 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 456
380 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
381 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
382 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
383 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
384 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
385 // CHECK3-NEXT:    [[DIV3:%.*]] = sdiv i32 [[TMP10]], 456
386 // CHECK3-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456
387 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]]
388 // CHECK3-NEXT:    [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
389 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
390 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[J]], align 4
391 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
392 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
393 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], [123 x [456 x i32]]* [[A]], i32 0, i32 [[TMP11]]
394 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
395 // CHECK3-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [456 x i32], [456 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP12]]
396 // CHECK3-NEXT:    store i32 0, i32* [[ARRAYIDX7]], align 4
397 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
398 // CHECK3:       omp.body.continue:
399 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
400 // CHECK3:       omp.inner.for.inc:
401 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
402 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
403 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
404 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
405 // CHECK3:       omp.inner.for.end:
406 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
407 // CHECK3:       omp.loop.exit:
408 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
409 // CHECK3-NEXT:    ret void
410 //
411 //
412 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
413 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] {
414 // CHECK3-NEXT:  entry:
415 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
416 // CHECK3-NEXT:    ret void
417 //
418 //
419 // CHECK9-LABEL: define {{[^@]+}}@main
420 // CHECK9-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
421 // CHECK9-NEXT:  entry:
422 // CHECK9-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
423 // CHECK9-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
424 // CHECK9-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
425 // CHECK9-NEXT:    [[N:%.*]] = alloca i32, align 4
426 // CHECK9-NEXT:    [[M:%.*]] = alloca i32, align 4
427 // CHECK9-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
428 // CHECK9-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
429 // CHECK9-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
430 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
431 // CHECK9-NEXT:    [[M_CASTED:%.*]] = alloca i64, align 8
432 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
433 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
434 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
435 // CHECK9-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
436 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
437 // CHECK9-NEXT:    [[_TMP2:%.*]] = alloca i32, align 4
438 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
439 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
440 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8
441 // CHECK9-NEXT:    store i32 0, i32* [[RETVAL]], align 4
442 // CHECK9-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
443 // CHECK9-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
444 // CHECK9-NEXT:    store i32 100, i32* [[N]], align 4
445 // CHECK9-NEXT:    store i32 2, i32* [[M]], align 4
446 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
447 // CHECK9-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
448 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[M]], align 4
449 // CHECK9-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
450 // CHECK9-NEXT:    [[TMP4:%.*]] = call i8* @llvm.stacksave()
451 // CHECK9-NEXT:    store i8* [[TMP4]], i8** [[SAVED_STACK]], align 8
452 // CHECK9-NEXT:    [[TMP5:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]]
453 // CHECK9-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4
454 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
455 // CHECK9-NEXT:    store i64 [[TMP3]], i64* [[__VLA_EXPR1]], align 8
456 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N]], align 4
457 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
458 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[CONV]], align 4
459 // CHECK9-NEXT:    [[TMP7:%.*]] = load i64, i64* [[N_CASTED]], align 8
460 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[M]], align 4
461 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[M_CASTED]] to i32*
462 // CHECK9-NEXT:    store i32 [[TMP8]], i32* [[CONV1]], align 4
463 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[M_CASTED]], align 8
464 // CHECK9-NEXT:    [[TMP10:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]]
465 // CHECK9-NEXT:    [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
466 // CHECK9-NEXT:    [[TMP12:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
467 // CHECK9-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP12]], i8* align 8 bitcast ([5 x i64]* @.offload_sizes to i8*), i64 40, i1 false)
468 // CHECK9-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
469 // CHECK9-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64*
470 // CHECK9-NEXT:    store i64 [[TMP7]], i64* [[TMP14]], align 8
471 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
472 // CHECK9-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64*
473 // CHECK9-NEXT:    store i64 [[TMP7]], i64* [[TMP16]], align 8
474 // CHECK9-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
475 // CHECK9-NEXT:    store i8* null, i8** [[TMP17]], align 8
476 // CHECK9-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
477 // CHECK9-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
478 // CHECK9-NEXT:    store i64 [[TMP9]], i64* [[TMP19]], align 8
479 // CHECK9-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
480 // CHECK9-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64*
481 // CHECK9-NEXT:    store i64 [[TMP9]], i64* [[TMP21]], align 8
482 // CHECK9-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
483 // CHECK9-NEXT:    store i8* null, i8** [[TMP22]], align 8
484 // CHECK9-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
485 // CHECK9-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64*
486 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP24]], align 8
487 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
488 // CHECK9-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64*
489 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP26]], align 8
490 // CHECK9-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
491 // CHECK9-NEXT:    store i8* null, i8** [[TMP27]], align 8
492 // CHECK9-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
493 // CHECK9-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
494 // CHECK9-NEXT:    store i64 [[TMP3]], i64* [[TMP29]], align 8
495 // CHECK9-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
496 // CHECK9-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
497 // CHECK9-NEXT:    store i64 [[TMP3]], i64* [[TMP31]], align 8
498 // CHECK9-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
499 // CHECK9-NEXT:    store i8* null, i8** [[TMP32]], align 8
500 // CHECK9-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
501 // CHECK9-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32**
502 // CHECK9-NEXT:    store i32* [[VLA]], i32** [[TMP34]], align 8
503 // CHECK9-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
504 // CHECK9-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32**
505 // CHECK9-NEXT:    store i32* [[VLA]], i32** [[TMP36]], align 8
506 // CHECK9-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
507 // CHECK9-NEXT:    store i64 [[TMP11]], i64* [[TMP37]], align 8
508 // CHECK9-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
509 // CHECK9-NEXT:    store i8* null, i8** [[TMP38]], align 8
510 // CHECK9-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
511 // CHECK9-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
512 // CHECK9-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
513 // CHECK9-NEXT:    [[TMP42:%.*]] = load i32, i32* [[N]], align 4
514 // CHECK9-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4
515 // CHECK9-NEXT:    [[TMP43:%.*]] = load i32, i32* [[M]], align 4
516 // CHECK9-NEXT:    store i32 [[TMP43]], i32* [[DOTCAPTURE_EXPR_3]], align 4
517 // CHECK9-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
518 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP44]], 0
519 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
520 // CHECK9-NEXT:    [[CONV5:%.*]] = sext i32 [[DIV]] to i64
521 // CHECK9-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
522 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP45]], 0
523 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
524 // CHECK9-NEXT:    [[CONV8:%.*]] = sext i32 [[DIV7]] to i64
525 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]]
526 // CHECK9-NEXT:    [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1
527 // CHECK9-NEXT:    store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8
528 // CHECK9-NEXT:    [[TMP46:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8
529 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP46]], 1
530 // CHECK9-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
531 // CHECK9-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
532 // CHECK9-NEXT:    store i32 1, i32* [[TMP47]], align 4
533 // CHECK9-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
534 // CHECK9-NEXT:    store i32 5, i32* [[TMP48]], align 4
535 // CHECK9-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
536 // CHECK9-NEXT:    store i8** [[TMP39]], i8*** [[TMP49]], align 8
537 // CHECK9-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
538 // CHECK9-NEXT:    store i8** [[TMP40]], i8*** [[TMP50]], align 8
539 // CHECK9-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
540 // CHECK9-NEXT:    store i64* [[TMP41]], i64** [[TMP51]], align 8
541 // CHECK9-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
542 // CHECK9-NEXT:    store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP52]], align 8
543 // CHECK9-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
544 // CHECK9-NEXT:    store i8** null, i8*** [[TMP53]], align 8
545 // CHECK9-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
546 // CHECK9-NEXT:    store i8** null, i8*** [[TMP54]], align 8
547 // CHECK9-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
548 // CHECK9-NEXT:    store i64 [[ADD]], i64* [[TMP55]], align 8
549 // CHECK9-NEXT:    [[TMP56:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
550 // CHECK9-NEXT:    [[TMP57:%.*]] = icmp ne i32 [[TMP56]], 0
551 // CHECK9-NEXT:    br i1 [[TMP57]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
552 // CHECK9:       omp_offload.failed:
553 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
554 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT]]
555 // CHECK9:       omp_offload.cont:
556 // CHECK9-NEXT:    [[TMP58:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
557 // CHECK9-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP58]])
558 // CHECK9-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
559 // CHECK9-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
560 // CHECK9-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
561 // CHECK9-NEXT:    [[TMP60:%.*]] = load i32, i32* [[RETVAL]], align 4
562 // CHECK9-NEXT:    ret i32 [[TMP60]]
563 //
564 //
565 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80
566 // CHECK9-SAME: (i64 noundef [[N:%.*]], i64 noundef [[M:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
567 // CHECK9-NEXT:  entry:
568 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
569 // CHECK9-NEXT:    [[M_ADDR:%.*]] = alloca i64, align 8
570 // CHECK9-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
571 // CHECK9-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
572 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
573 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
574 // CHECK9-NEXT:    [[M_CASTED:%.*]] = alloca i64, align 8
575 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
576 // CHECK9-NEXT:    store i64 [[M]], i64* [[M_ADDR]], align 8
577 // CHECK9-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
578 // CHECK9-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
579 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
580 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
581 // CHECK9-NEXT:    [[CONV3:%.*]] = bitcast i64* [[M_ADDR]] to i32*
582 // CHECK9-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
583 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
584 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8
585 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 4
586 // CHECK9-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED]] to i32*
587 // CHECK9-NEXT:    store i32 [[TMP3]], i32* [[CONV4]], align 4
588 // CHECK9-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
589 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[CONV3]], align 4
590 // CHECK9-NEXT:    [[CONV5:%.*]] = bitcast i64* [[M_CASTED]] to i32*
591 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[CONV5]], align 4
592 // CHECK9-NEXT:    [[TMP6:%.*]] = load i64, i64* [[M_CASTED]], align 8
593 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP0]], i64 [[TMP1]], i32* [[TMP2]])
594 // CHECK9-NEXT:    ret void
595 //
596 //
597 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
598 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[M:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
599 // CHECK9-NEXT:  entry:
600 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
601 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
602 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
603 // CHECK9-NEXT:    [[M_ADDR:%.*]] = alloca i64, align 8
604 // CHECK9-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
605 // CHECK9-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
606 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
607 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
608 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
609 // CHECK9-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
610 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
611 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
612 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_6:%.*]] = alloca i64, align 8
613 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
614 // CHECK9-NEXT:    [[J:%.*]] = alloca i32, align 4
615 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
616 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
617 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
618 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
619 // CHECK9-NEXT:    [[I13:%.*]] = alloca i32, align 4
620 // CHECK9-NEXT:    [[J14:%.*]] = alloca i32, align 4
621 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
622 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
623 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
624 // CHECK9-NEXT:    store i64 [[M]], i64* [[M_ADDR]], align 8
625 // CHECK9-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
626 // CHECK9-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
627 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
628 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
629 // CHECK9-NEXT:    [[CONV3:%.*]] = bitcast i64* [[M_ADDR]] to i32*
630 // CHECK9-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
631 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
632 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8
633 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV]], align 4
634 // CHECK9-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
635 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV3]], align 4
636 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_5]], align 4
637 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
638 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
639 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
640 // CHECK9-NEXT:    [[CONV7:%.*]] = sext i32 [[DIV]] to i64
641 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
642 // CHECK9-NEXT:    [[SUB8:%.*]] = sub nsw i32 [[TMP6]], 0
643 // CHECK9-NEXT:    [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
644 // CHECK9-NEXT:    [[CONV10:%.*]] = sext i32 [[DIV9]] to i64
645 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV7]], [[CONV10]]
646 // CHECK9-NEXT:    [[SUB11:%.*]] = sub nsw i64 [[MUL]], 1
647 // CHECK9-NEXT:    store i64 [[SUB11]], i64* [[DOTCAPTURE_EXPR_6]], align 8
648 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
649 // CHECK9-NEXT:    store i32 0, i32* [[J]], align 4
650 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
651 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
652 // CHECK9-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
653 // CHECK9:       land.lhs.true:
654 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
655 // CHECK9-NEXT:    [[CMP12:%.*]] = icmp slt i32 0, [[TMP8]]
656 // CHECK9-NEXT:    br i1 [[CMP12]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
657 // CHECK9:       omp.precond.then:
658 // CHECK9-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
659 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_6]], align 8
660 // CHECK9-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
661 // CHECK9-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
662 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
663 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
664 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
665 // CHECK9-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
666 // CHECK9-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
667 // CHECK9-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_6]], align 8
668 // CHECK9-NEXT:    [[CMP15:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]]
669 // CHECK9-NEXT:    br i1 [[CMP15]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
670 // CHECK9:       cond.true:
671 // CHECK9-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_6]], align 8
672 // CHECK9-NEXT:    br label [[COND_END:%.*]]
673 // CHECK9:       cond.false:
674 // CHECK9-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
675 // CHECK9-NEXT:    br label [[COND_END]]
676 // CHECK9:       cond.end:
677 // CHECK9-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
678 // CHECK9-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
679 // CHECK9-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
680 // CHECK9-NEXT:    store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8
681 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
682 // CHECK9:       omp.inner.for.cond:
683 // CHECK9-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
684 // CHECK9-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
685 // CHECK9-NEXT:    [[CMP16:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]]
686 // CHECK9-NEXT:    br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
687 // CHECK9:       omp.inner.for.body:
688 // CHECK9-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
689 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
690 // CHECK9-NEXT:    [[SUB17:%.*]] = sub nsw i32 [[TMP20]], 0
691 // CHECK9-NEXT:    [[DIV18:%.*]] = sdiv i32 [[SUB17]], 1
692 // CHECK9-NEXT:    [[MUL19:%.*]] = mul nsw i32 1, [[DIV18]]
693 // CHECK9-NEXT:    [[CONV20:%.*]] = sext i32 [[MUL19]] to i64
694 // CHECK9-NEXT:    [[DIV21:%.*]] = sdiv i64 [[TMP19]], [[CONV20]]
695 // CHECK9-NEXT:    [[MUL22:%.*]] = mul nsw i64 [[DIV21]], 1
696 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL22]]
697 // CHECK9-NEXT:    [[CONV23:%.*]] = trunc i64 [[ADD]] to i32
698 // CHECK9-NEXT:    store i32 [[CONV23]], i32* [[I13]], align 4
699 // CHECK9-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
700 // CHECK9-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
701 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
702 // CHECK9-NEXT:    [[SUB24:%.*]] = sub nsw i32 [[TMP23]], 0
703 // CHECK9-NEXT:    [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1
704 // CHECK9-NEXT:    [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]]
705 // CHECK9-NEXT:    [[CONV27:%.*]] = sext i32 [[MUL26]] to i64
706 // CHECK9-NEXT:    [[DIV28:%.*]] = sdiv i64 [[TMP22]], [[CONV27]]
707 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
708 // CHECK9-NEXT:    [[SUB29:%.*]] = sub nsw i32 [[TMP24]], 0
709 // CHECK9-NEXT:    [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1
710 // CHECK9-NEXT:    [[MUL31:%.*]] = mul nsw i32 1, [[DIV30]]
711 // CHECK9-NEXT:    [[CONV32:%.*]] = sext i32 [[MUL31]] to i64
712 // CHECK9-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[DIV28]], [[CONV32]]
713 // CHECK9-NEXT:    [[SUB34:%.*]] = sub nsw i64 [[TMP21]], [[MUL33]]
714 // CHECK9-NEXT:    [[MUL35:%.*]] = mul nsw i64 [[SUB34]], 1
715 // CHECK9-NEXT:    [[ADD36:%.*]] = add nsw i64 0, [[MUL35]]
716 // CHECK9-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
717 // CHECK9-NEXT:    store i32 [[CONV37]], i32* [[J14]], align 4
718 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I13]], align 4
719 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP25]] to i64
720 // CHECK9-NEXT:    [[TMP26:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP1]]
721 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[TMP26]]
722 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[J14]], align 4
723 // CHECK9-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP27]] to i64
724 // CHECK9-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, i32* [[ARRAYIDX]], i64 [[IDXPROM38]]
725 // CHECK9-NEXT:    store i32 0, i32* [[ARRAYIDX39]], align 4
726 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
727 // CHECK9:       omp.body.continue:
728 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
729 // CHECK9:       omp.inner.for.inc:
730 // CHECK9-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
731 // CHECK9-NEXT:    [[ADD40:%.*]] = add nsw i64 [[TMP28]], 1
732 // CHECK9-NEXT:    store i64 [[ADD40]], i64* [[DOTOMP_IV]], align 8
733 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
734 // CHECK9:       omp.inner.for.end:
735 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
736 // CHECK9:       omp.loop.exit:
737 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
738 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
739 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
740 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
741 // CHECK9:       omp.precond.end:
742 // CHECK9-NEXT:    ret void
743 //
744 //
745 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIiLi10ELi2EEiT_
746 // CHECK9-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat {
747 // CHECK9-NEXT:  entry:
748 // CHECK9-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
749 // CHECK9-NEXT:    [[A:%.*]] = alloca [10 x [2 x i32]], align 4
750 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
751 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
752 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
753 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
754 // CHECK9-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
755 // CHECK9-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
756 // CHECK9-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
757 // CHECK9-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x [2 x i32]]**
758 // CHECK9-NEXT:    store [10 x [2 x i32]]* [[A]], [10 x [2 x i32]]** [[TMP1]], align 8
759 // CHECK9-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
760 // CHECK9-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x [2 x i32]]**
761 // CHECK9-NEXT:    store [10 x [2 x i32]]* [[A]], [10 x [2 x i32]]** [[TMP3]], align 8
762 // CHECK9-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
763 // CHECK9-NEXT:    store i8* null, i8** [[TMP4]], align 8
764 // CHECK9-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
765 // CHECK9-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
766 // CHECK9-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
767 // CHECK9-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
768 // CHECK9-NEXT:    store i32 1, i32* [[TMP7]], align 4
769 // CHECK9-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
770 // CHECK9-NEXT:    store i32 1, i32* [[TMP8]], align 4
771 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
772 // CHECK9-NEXT:    store i8** [[TMP5]], i8*** [[TMP9]], align 8
773 // CHECK9-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
774 // CHECK9-NEXT:    store i8** [[TMP6]], i8*** [[TMP10]], align 8
775 // CHECK9-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
776 // CHECK9-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP11]], align 8
777 // CHECK9-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
778 // CHECK9-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP12]], align 8
779 // CHECK9-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
780 // CHECK9-NEXT:    store i8** null, i8*** [[TMP13]], align 8
781 // CHECK9-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
782 // CHECK9-NEXT:    store i8** null, i8*** [[TMP14]], align 8
783 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
784 // CHECK9-NEXT:    store i64 20, i64* [[TMP15]], align 8
785 // CHECK9-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
786 // CHECK9-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
787 // CHECK9-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
788 // CHECK9:       omp_offload.failed:
789 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67([10 x [2 x i32]]* [[A]]) #[[ATTR3]]
790 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT]]
791 // CHECK9:       omp_offload.cont:
792 // CHECK9-NEXT:    ret i32 0
793 //
794 //
795 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67
796 // CHECK9-SAME: ([10 x [2 x i32]]* noundef nonnull align 4 dereferenceable(80) [[A:%.*]]) #[[ATTR2]] {
797 // CHECK9-NEXT:  entry:
798 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca [10 x [2 x i32]]*, align 8
799 // CHECK9-NEXT:    store [10 x [2 x i32]]* [[A]], [10 x [2 x i32]]** [[A_ADDR]], align 8
800 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x [2 x i32]]*, [10 x [2 x i32]]** [[A_ADDR]], align 8
801 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x [2 x i32]]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [10 x [2 x i32]]* [[TMP0]])
802 // CHECK9-NEXT:    ret void
803 //
804 //
805 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1
806 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x [2 x i32]]* noundef nonnull align 4 dereferenceable(80) [[A:%.*]]) #[[ATTR2]] {
807 // CHECK9-NEXT:  entry:
808 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
809 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
810 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca [10 x [2 x i32]]*, align 8
811 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
812 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
813 // CHECK9-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
814 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
815 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
816 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
817 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
818 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
819 // CHECK9-NEXT:    [[J:%.*]] = alloca i32, align 4
820 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
821 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
822 // CHECK9-NEXT:    store [10 x [2 x i32]]* [[A]], [10 x [2 x i32]]** [[A_ADDR]], align 8
823 // CHECK9-NEXT:    [[TMP0:%.*]] = load [10 x [2 x i32]]*, [10 x [2 x i32]]** [[A_ADDR]], align 8
824 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
825 // CHECK9-NEXT:    store i32 19, i32* [[DOTOMP_UB]], align 4
826 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
827 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
828 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
829 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
830 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
831 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
832 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19
833 // CHECK9-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
834 // CHECK9:       cond.true:
835 // CHECK9-NEXT:    br label [[COND_END:%.*]]
836 // CHECK9:       cond.false:
837 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
838 // CHECK9-NEXT:    br label [[COND_END]]
839 // CHECK9:       cond.end:
840 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
841 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
842 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
843 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
844 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
845 // CHECK9:       omp.inner.for.cond:
846 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
847 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
848 // CHECK9-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
849 // CHECK9-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
850 // CHECK9:       omp.inner.for.body:
851 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
852 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 2
853 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
854 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
855 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
856 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
857 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
858 // CHECK9-NEXT:    [[DIV3:%.*]] = sdiv i32 [[TMP10]], 2
859 // CHECK9-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 2
860 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]]
861 // CHECK9-NEXT:    [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
862 // CHECK9-NEXT:    [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
863 // CHECK9-NEXT:    store i32 [[ADD6]], i32* [[J]], align 4
864 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
865 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
866 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], [10 x [2 x i32]]* [[TMP0]], i64 0, i64 [[IDXPROM]]
867 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
868 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP12]] to i64
869 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]]
870 // CHECK9-NEXT:    store i32 0, i32* [[ARRAYIDX8]], align 4
871 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
872 // CHECK9:       omp.body.continue:
873 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
874 // CHECK9:       omp.inner.for.inc:
875 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
876 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
877 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4
878 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]]
879 // CHECK9:       omp.inner.for.end:
880 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
881 // CHECK9:       omp.loop.exit:
882 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
883 // CHECK9-NEXT:    ret void
884 //
885 //
886 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
887 // CHECK9-SAME: () #[[ATTR6:[0-9]+]] {
888 // CHECK9-NEXT:  entry:
889 // CHECK9-NEXT:    call void @__tgt_register_requires(i64 1)
890 // CHECK9-NEXT:    ret void
891 //
892 //
893 // CHECK11-LABEL: define {{[^@]+}}@main
894 // CHECK11-SAME: (i32 noundef [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
895 // CHECK11-NEXT:  entry:
896 // CHECK11-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
897 // CHECK11-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
898 // CHECK11-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 4
899 // CHECK11-NEXT:    [[N:%.*]] = alloca i32, align 4
900 // CHECK11-NEXT:    [[M:%.*]] = alloca i32, align 4
901 // CHECK11-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
902 // CHECK11-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
903 // CHECK11-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
904 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
905 // CHECK11-NEXT:    [[M_CASTED:%.*]] = alloca i32, align 4
906 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
907 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
908 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
909 // CHECK11-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4
910 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
911 // CHECK11-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
912 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
913 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
914 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
915 // CHECK11-NEXT:    store i32 0, i32* [[RETVAL]], align 4
916 // CHECK11-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
917 // CHECK11-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
918 // CHECK11-NEXT:    store i32 100, i32* [[N]], align 4
919 // CHECK11-NEXT:    store i32 2, i32* [[M]], align 4
920 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
921 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[M]], align 4
922 // CHECK11-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
923 // CHECK11-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
924 // CHECK11-NEXT:    [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]]
925 // CHECK11-NEXT:    [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4
926 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
927 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR1]], align 4
928 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N]], align 4
929 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[N_CASTED]], align 4
930 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_CASTED]], align 4
931 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[M]], align 4
932 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[M_CASTED]], align 4
933 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[M_CASTED]], align 4
934 // CHECK11-NEXT:    [[TMP8:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]]
935 // CHECK11-NEXT:    [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4
936 // CHECK11-NEXT:    [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
937 // CHECK11-NEXT:    [[TMP11:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
938 // CHECK11-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP11]], i8* align 4 bitcast ([5 x i64]* @.offload_sizes to i8*), i32 40, i1 false)
939 // CHECK11-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
940 // CHECK11-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
941 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[TMP13]], align 4
942 // CHECK11-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
943 // CHECK11-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
944 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[TMP15]], align 4
945 // CHECK11-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
946 // CHECK11-NEXT:    store i8* null, i8** [[TMP16]], align 4
947 // CHECK11-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
948 // CHECK11-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
949 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[TMP18]], align 4
950 // CHECK11-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
951 // CHECK11-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
952 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[TMP20]], align 4
953 // CHECK11-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
954 // CHECK11-NEXT:    store i8* null, i8** [[TMP21]], align 4
955 // CHECK11-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
956 // CHECK11-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
957 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[TMP23]], align 4
958 // CHECK11-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
959 // CHECK11-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
960 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[TMP25]], align 4
961 // CHECK11-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
962 // CHECK11-NEXT:    store i8* null, i8** [[TMP26]], align 4
963 // CHECK11-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
964 // CHECK11-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
965 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP28]], align 4
966 // CHECK11-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
967 // CHECK11-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
968 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP30]], align 4
969 // CHECK11-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
970 // CHECK11-NEXT:    store i8* null, i8** [[TMP31]], align 4
971 // CHECK11-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
972 // CHECK11-NEXT:    [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32**
973 // CHECK11-NEXT:    store i32* [[VLA]], i32** [[TMP33]], align 4
974 // CHECK11-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
975 // CHECK11-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32**
976 // CHECK11-NEXT:    store i32* [[VLA]], i32** [[TMP35]], align 4
977 // CHECK11-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
978 // CHECK11-NEXT:    store i64 [[TMP10]], i64* [[TMP36]], align 4
979 // CHECK11-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
980 // CHECK11-NEXT:    store i8* null, i8** [[TMP37]], align 4
981 // CHECK11-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
982 // CHECK11-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
983 // CHECK11-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
984 // CHECK11-NEXT:    [[TMP41:%.*]] = load i32, i32* [[N]], align 4
985 // CHECK11-NEXT:    store i32 [[TMP41]], i32* [[DOTCAPTURE_EXPR_]], align 4
986 // CHECK11-NEXT:    [[TMP42:%.*]] = load i32, i32* [[M]], align 4
987 // CHECK11-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_2]], align 4
988 // CHECK11-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
989 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP43]], 0
990 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
991 // CHECK11-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
992 // CHECK11-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
993 // CHECK11-NEXT:    [[SUB4:%.*]] = sub nsw i32 [[TMP44]], 0
994 // CHECK11-NEXT:    [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
995 // CHECK11-NEXT:    [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
996 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
997 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
998 // CHECK11-NEXT:    store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8
999 // CHECK11-NEXT:    [[TMP45:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8
1000 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP45]], 1
1001 // CHECK11-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
1002 // CHECK11-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
1003 // CHECK11-NEXT:    store i32 1, i32* [[TMP46]], align 4
1004 // CHECK11-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
1005 // CHECK11-NEXT:    store i32 5, i32* [[TMP47]], align 4
1006 // CHECK11-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
1007 // CHECK11-NEXT:    store i8** [[TMP38]], i8*** [[TMP48]], align 4
1008 // CHECK11-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
1009 // CHECK11-NEXT:    store i8** [[TMP39]], i8*** [[TMP49]], align 4
1010 // CHECK11-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
1011 // CHECK11-NEXT:    store i64* [[TMP40]], i64** [[TMP50]], align 4
1012 // CHECK11-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
1013 // CHECK11-NEXT:    store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP51]], align 4
1014 // CHECK11-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
1015 // CHECK11-NEXT:    store i8** null, i8*** [[TMP52]], align 4
1016 // CHECK11-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
1017 // CHECK11-NEXT:    store i8** null, i8*** [[TMP53]], align 4
1018 // CHECK11-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
1019 // CHECK11-NEXT:    store i64 [[ADD]], i64* [[TMP54]], align 8
1020 // CHECK11-NEXT:    [[TMP55:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
1021 // CHECK11-NEXT:    [[TMP56:%.*]] = icmp ne i32 [[TMP55]], 0
1022 // CHECK11-NEXT:    br i1 [[TMP56]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1023 // CHECK11:       omp_offload.failed:
1024 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
1025 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1026 // CHECK11:       omp_offload.cont:
1027 // CHECK11-NEXT:    [[TMP57:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
1028 // CHECK11-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP57]])
1029 // CHECK11-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
1030 // CHECK11-NEXT:    [[TMP58:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
1031 // CHECK11-NEXT:    call void @llvm.stackrestore(i8* [[TMP58]])
1032 // CHECK11-NEXT:    [[TMP59:%.*]] = load i32, i32* [[RETVAL]], align 4
1033 // CHECK11-NEXT:    ret i32 [[TMP59]]
1034 //
1035 //
1036 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80
1037 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32 noundef [[M:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
1038 // CHECK11-NEXT:  entry:
1039 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1040 // CHECK11-NEXT:    [[M_ADDR:%.*]] = alloca i32, align 4
1041 // CHECK11-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
1042 // CHECK11-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
1043 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
1044 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
1045 // CHECK11-NEXT:    [[M_CASTED:%.*]] = alloca i32, align 4
1046 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1047 // CHECK11-NEXT:    store i32 [[M]], i32* [[M_ADDR]], align 4
1048 // CHECK11-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
1049 // CHECK11-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
1050 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
1051 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
1052 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
1053 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 4
1054 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
1055 // CHECK11-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
1056 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
1057 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[M_ADDR]], align 4
1058 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[M_CASTED]], align 4
1059 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[M_CASTED]], align 4
1060 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP0]], i32 [[TMP1]], i32* [[TMP2]])
1061 // CHECK11-NEXT:    ret void
1062 //
1063 //
1064 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
1065 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[M:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
1066 // CHECK11-NEXT:  entry:
1067 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1068 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1069 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1070 // CHECK11-NEXT:    [[M_ADDR:%.*]] = alloca i32, align 4
1071 // CHECK11-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
1072 // CHECK11-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
1073 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
1074 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
1075 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1076 // CHECK11-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
1077 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1078 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
1079 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i64, align 8
1080 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
1081 // CHECK11-NEXT:    [[J:%.*]] = alloca i32, align 4
1082 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
1083 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
1084 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
1085 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1086 // CHECK11-NEXT:    [[I11:%.*]] = alloca i32, align 4
1087 // CHECK11-NEXT:    [[J12:%.*]] = alloca i32, align 4
1088 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1089 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1090 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1091 // CHECK11-NEXT:    store i32 [[M]], i32* [[M_ADDR]], align 4
1092 // CHECK11-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
1093 // CHECK11-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
1094 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
1095 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
1096 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
1097 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 4
1098 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
1099 // CHECK11-NEXT:    store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4
1100 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[M_ADDR]], align 4
1101 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_4]], align 4
1102 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1103 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1104 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1105 // CHECK11-NEXT:    [[CONV:%.*]] = sext i32 [[DIV]] to i64
1106 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
1107 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP6]], 0
1108 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
1109 // CHECK11-NEXT:    [[CONV8:%.*]] = sext i32 [[DIV7]] to i64
1110 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV8]]
1111 // CHECK11-NEXT:    [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1
1112 // CHECK11-NEXT:    store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_5]], align 8
1113 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
1114 // CHECK11-NEXT:    store i32 0, i32* [[J]], align 4
1115 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1116 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
1117 // CHECK11-NEXT:    br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
1118 // CHECK11:       land.lhs.true:
1119 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
1120 // CHECK11-NEXT:    [[CMP10:%.*]] = icmp slt i32 0, [[TMP8]]
1121 // CHECK11-NEXT:    br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
1122 // CHECK11:       omp.precond.then:
1123 // CHECK11-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
1124 // CHECK11-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_5]], align 8
1125 // CHECK11-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_UB]], align 8
1126 // CHECK11-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
1127 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1128 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1129 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1130 // CHECK11-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
1131 // CHECK11-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
1132 // CHECK11-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_5]], align 8
1133 // CHECK11-NEXT:    [[CMP13:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]]
1134 // CHECK11-NEXT:    br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1135 // CHECK11:       cond.true:
1136 // CHECK11-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_5]], align 8
1137 // CHECK11-NEXT:    br label [[COND_END:%.*]]
1138 // CHECK11:       cond.false:
1139 // CHECK11-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
1140 // CHECK11-NEXT:    br label [[COND_END]]
1141 // CHECK11:       cond.end:
1142 // CHECK11-NEXT:    [[COND:%.*]] = phi i64 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1143 // CHECK11-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
1144 // CHECK11-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
1145 // CHECK11-NEXT:    store i64 [[TMP16]], i64* [[DOTOMP_IV]], align 8
1146 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1147 // CHECK11:       omp.inner.for.cond:
1148 // CHECK11-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1149 // CHECK11-NEXT:    [[TMP18:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
1150 // CHECK11-NEXT:    [[CMP14:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]]
1151 // CHECK11-NEXT:    br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1152 // CHECK11:       omp.inner.for.body:
1153 // CHECK11-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1154 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
1155 // CHECK11-NEXT:    [[SUB15:%.*]] = sub nsw i32 [[TMP20]], 0
1156 // CHECK11-NEXT:    [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
1157 // CHECK11-NEXT:    [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
1158 // CHECK11-NEXT:    [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
1159 // CHECK11-NEXT:    [[DIV19:%.*]] = sdiv i64 [[TMP19]], [[CONV18]]
1160 // CHECK11-NEXT:    [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
1161 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
1162 // CHECK11-NEXT:    [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
1163 // CHECK11-NEXT:    store i32 [[CONV21]], i32* [[I11]], align 4
1164 // CHECK11-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1165 // CHECK11-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1166 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
1167 // CHECK11-NEXT:    [[SUB22:%.*]] = sub nsw i32 [[TMP23]], 0
1168 // CHECK11-NEXT:    [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
1169 // CHECK11-NEXT:    [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
1170 // CHECK11-NEXT:    [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
1171 // CHECK11-NEXT:    [[DIV26:%.*]] = sdiv i64 [[TMP22]], [[CONV25]]
1172 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
1173 // CHECK11-NEXT:    [[SUB27:%.*]] = sub nsw i32 [[TMP24]], 0
1174 // CHECK11-NEXT:    [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
1175 // CHECK11-NEXT:    [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
1176 // CHECK11-NEXT:    [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
1177 // CHECK11-NEXT:    [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
1178 // CHECK11-NEXT:    [[SUB32:%.*]] = sub nsw i64 [[TMP21]], [[MUL31]]
1179 // CHECK11-NEXT:    [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
1180 // CHECK11-NEXT:    [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
1181 // CHECK11-NEXT:    [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
1182 // CHECK11-NEXT:    store i32 [[CONV35]], i32* [[J12]], align 4
1183 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I11]], align 4
1184 // CHECK11-NEXT:    [[TMP26:%.*]] = mul nsw i32 [[TMP25]], [[TMP1]]
1185 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 [[TMP26]]
1186 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[J12]], align 4
1187 // CHECK11-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, i32* [[ARRAYIDX]], i32 [[TMP27]]
1188 // CHECK11-NEXT:    store i32 0, i32* [[ARRAYIDX36]], align 4
1189 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1190 // CHECK11:       omp.body.continue:
1191 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1192 // CHECK11:       omp.inner.for.inc:
1193 // CHECK11-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
1194 // CHECK11-NEXT:    [[ADD37:%.*]] = add nsw i64 [[TMP28]], 1
1195 // CHECK11-NEXT:    store i64 [[ADD37]], i64* [[DOTOMP_IV]], align 8
1196 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
1197 // CHECK11:       omp.inner.for.end:
1198 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1199 // CHECK11:       omp.loop.exit:
1200 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1201 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
1202 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
1203 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
1204 // CHECK11:       omp.precond.end:
1205 // CHECK11-NEXT:    ret void
1206 //
1207 //
1208 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIiLi10ELi2EEiT_
1209 // CHECK11-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat {
1210 // CHECK11-NEXT:  entry:
1211 // CHECK11-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
1212 // CHECK11-NEXT:    [[A:%.*]] = alloca [10 x [2 x i32]], align 4
1213 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
1214 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
1215 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
1216 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1217 // CHECK11-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
1218 // CHECK11-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
1219 // CHECK11-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1220 // CHECK11-NEXT:    [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x [2 x i32]]**
1221 // CHECK11-NEXT:    store [10 x [2 x i32]]* [[A]], [10 x [2 x i32]]** [[TMP1]], align 4
1222 // CHECK11-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1223 // CHECK11-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x [2 x i32]]**
1224 // CHECK11-NEXT:    store [10 x [2 x i32]]* [[A]], [10 x [2 x i32]]** [[TMP3]], align 4
1225 // CHECK11-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
1226 // CHECK11-NEXT:    store i8* null, i8** [[TMP4]], align 4
1227 // CHECK11-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1228 // CHECK11-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1229 // CHECK11-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
1230 // CHECK11-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
1231 // CHECK11-NEXT:    store i32 1, i32* [[TMP7]], align 4
1232 // CHECK11-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
1233 // CHECK11-NEXT:    store i32 1, i32* [[TMP8]], align 4
1234 // CHECK11-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
1235 // CHECK11-NEXT:    store i8** [[TMP5]], i8*** [[TMP9]], align 4
1236 // CHECK11-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
1237 // CHECK11-NEXT:    store i8** [[TMP6]], i8*** [[TMP10]], align 4
1238 // CHECK11-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
1239 // CHECK11-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP11]], align 4
1240 // CHECK11-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
1241 // CHECK11-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP12]], align 4
1242 // CHECK11-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
1243 // CHECK11-NEXT:    store i8** null, i8*** [[TMP13]], align 4
1244 // CHECK11-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
1245 // CHECK11-NEXT:    store i8** null, i8*** [[TMP14]], align 4
1246 // CHECK11-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
1247 // CHECK11-NEXT:    store i64 20, i64* [[TMP15]], align 8
1248 // CHECK11-NEXT:    [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
1249 // CHECK11-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
1250 // CHECK11-NEXT:    br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1251 // CHECK11:       omp_offload.failed:
1252 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67([10 x [2 x i32]]* [[A]]) #[[ATTR3]]
1253 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1254 // CHECK11:       omp_offload.cont:
1255 // CHECK11-NEXT:    ret i32 0
1256 //
1257 //
1258 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67
1259 // CHECK11-SAME: ([10 x [2 x i32]]* noundef nonnull align 4 dereferenceable(80) [[A:%.*]]) #[[ATTR2]] {
1260 // CHECK11-NEXT:  entry:
1261 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca [10 x [2 x i32]]*, align 4
1262 // CHECK11-NEXT:    store [10 x [2 x i32]]* [[A]], [10 x [2 x i32]]** [[A_ADDR]], align 4
1263 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x [2 x i32]]*, [10 x [2 x i32]]** [[A_ADDR]], align 4
1264 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x [2 x i32]]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [10 x [2 x i32]]* [[TMP0]])
1265 // CHECK11-NEXT:    ret void
1266 //
1267 //
1268 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
1269 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x [2 x i32]]* noundef nonnull align 4 dereferenceable(80) [[A:%.*]]) #[[ATTR2]] {
1270 // CHECK11-NEXT:  entry:
1271 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1272 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1273 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca [10 x [2 x i32]]*, align 4
1274 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1275 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1276 // CHECK11-NEXT:    [[_TMP1:%.*]] = alloca i32, align 4
1277 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1278 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1279 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1280 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1281 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
1282 // CHECK11-NEXT:    [[J:%.*]] = alloca i32, align 4
1283 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1284 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1285 // CHECK11-NEXT:    store [10 x [2 x i32]]* [[A]], [10 x [2 x i32]]** [[A_ADDR]], align 4
1286 // CHECK11-NEXT:    [[TMP0:%.*]] = load [10 x [2 x i32]]*, [10 x [2 x i32]]** [[A_ADDR]], align 4
1287 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1288 // CHECK11-NEXT:    store i32 19, i32* [[DOTOMP_UB]], align 4
1289 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1290 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1291 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1292 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1293 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1294 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1295 // CHECK11-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19
1296 // CHECK11-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1297 // CHECK11:       cond.true:
1298 // CHECK11-NEXT:    br label [[COND_END:%.*]]
1299 // CHECK11:       cond.false:
1300 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1301 // CHECK11-NEXT:    br label [[COND_END]]
1302 // CHECK11:       cond.end:
1303 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1304 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1305 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1306 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1307 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1308 // CHECK11:       omp.inner.for.cond:
1309 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1310 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1311 // CHECK11-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1312 // CHECK11-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1313 // CHECK11:       omp.inner.for.body:
1314 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1315 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP8]], 2
1316 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
1317 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1318 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
1319 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1320 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1321 // CHECK11-NEXT:    [[DIV3:%.*]] = sdiv i32 [[TMP10]], 2
1322 // CHECK11-NEXT:    [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 2
1323 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]]
1324 // CHECK11-NEXT:    [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
1325 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 0, [[MUL5]]
1326 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[J]], align 4
1327 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[I]], align 4
1328 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], [10 x [2 x i32]]* [[TMP0]], i32 0, i32 [[TMP11]]
1329 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[J]], align 4
1330 // CHECK11-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[ARRAYIDX]], i32 0, i32 [[TMP12]]
1331 // CHECK11-NEXT:    store i32 0, i32* [[ARRAYIDX7]], align 4
1332 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1333 // CHECK11:       omp.body.continue:
1334 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1335 // CHECK11:       omp.inner.for.inc:
1336 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1337 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
1338 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
1339 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]]
1340 // CHECK11:       omp.inner.for.end:
1341 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1342 // CHECK11:       omp.loop.exit:
1343 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1344 // CHECK11-NEXT:    ret void
1345 //
1346 //
1347 // CHECK11-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1348 // CHECK11-SAME: () #[[ATTR6:[0-9]+]] {
1349 // CHECK11-NEXT:  entry:
1350 // CHECK11-NEXT:    call void @__tgt_register_requires(i64 1)
1351 // CHECK11-NEXT:    ret void
1352 //
1353