1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test host codegen.
3 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck  %s --check-prefix=CHECK1
4 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
5 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK2
6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck  %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK4
9 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck  %s --check-prefix=CHECK5
10 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
11 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK6
12 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck  %s --check-prefix=CHECK7
13 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
14 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK8
15 
16 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck  %s --check-prefix=CHECK9
17 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK10
19 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck  %s --check-prefix=CHECK11
20 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
21 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK12
22 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck  %s --check-prefix=CHECK13
23 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
24 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK14
25 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck  %s --check-prefix=CHECK15
26 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
27 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK16
28 
29 // Test target codegen - host bc file has to be created first.
30 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
31 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck  %s --check-prefix=CHECK17
32 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
33 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK18
34 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
35 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck  %s --check-prefix=CHECK19
36 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
37 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK20
38 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
39 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck  %s --check-prefix=CHECK21
40 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
41 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK22
42 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
43 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck  %s --check-prefix=CHECK23
44 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
45 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK24
46 
47 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
48 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck  %s --check-prefix=CHECK25
49 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
50 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK26
51 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
52 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck  %s --check-prefix=CHECK27
53 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
54 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK28
55 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
56 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck  %s --check-prefix=CHECK29
57 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
58 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK30
59 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
60 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck  %s --check-prefix=CHECK31
61 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
62 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck  %s --check-prefix=CHECK32
63 // expected-no-diagnostics
64 
65 #ifndef HEADER
66 #define HEADER
67 
68 
69 
70 
71 // We have 8 target regions, but only 7 that actually will generate offloading
72 // code, only 6 will have mapped arguments, and only 4 have all-constant map
73 // sizes.
74 
75 
76 
77 // Check target registration is registered as a Ctor.
78 
79 
80 template<typename tx, typename ty>
81 struct TT{
82   tx X;
83   ty Y;
84 };
85 
86 int global;
87 
88 int foo(int n) {
89   int a = 0;
90   short aa = 0;
91   float b[10];
92   float bn[n];
93   double c[5][10];
94   double cn[5][n];
95   TT<long long, char> d;
96 
97   #pragma omp target teams distribute simd num_teams(a) thread_limit(a) firstprivate(aa) simdlen(16) nowait
98   for (int i = 0; i < 10; ++i) {
99   }
100 
101 #ifdef OMP5
102   #pragma omp target teams distribute simd if(target: 0) safelen(32) linear(a) if(simd: 1) nontemporal(a)
103 #else
104   #pragma omp target teams distribute simd if(target: 0) safelen(32) linear(a)
105 #endif // OMP5
106   for (a = 0; a < 10; ++a) {
107     a += 1;
108   }
109 
110 
111   #pragma omp target teams distribute simd if(target: 1)
112   for (int i = 0; i < 10; ++i) {
113     aa += 1;
114   }
115 
116 
117 
118   #pragma omp target teams distribute simd if(target: n>10)
119   for (int i = 0; i < 10; ++i) {
120     a += 1;
121     aa += 1;
122   }
123 
124   // We capture 3 VLA sizes in this target region
125 
126 
127 
128 
129 
130   // The names below are not necessarily consistent with the names used for the
131   // addresses above as some are repeated.
132 
133 
134 
135 
136 
137 
138 
139 
140 
141 
142   #pragma omp target teams distribute simd if(target: n>20) aligned(b)
143   for (int i = 0; i < 10; ++i) {
144     a += 1;
145     b[2] += 1.0;
146     bn[3] += 1.0;
147     c[1][2] += 1.0;
148     cn[1][3] += 1.0;
149     d.X += 1;
150     d.Y += 1;
151   }
152 
153   return a;
154 }
155 
156 // Check that the offloading functions are emitted and that the arguments are
157 // correct and loaded correctly for the target regions in foo().
158 
159 
160 
161 
162 // Create stack storage and store argument in there.
163 
164 // Create stack storage and store argument in there.
165 
166 // Create stack storage and store argument in there.
167 
168 // Create local storage for each capture.
169 
170 
171 
172 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
173 
174 template<typename tx>
175 tx ftemplate(int n) {
176   tx a = 0;
177   short aa = 0;
178   tx b[10];
179 
180   #pragma omp target teams distribute simd if(target: n>40)
181   for (int i = 0; i < 10; ++i) {
182     a += 1;
183     aa += 1;
184     b[2] += 1;
185   }
186 
187   return a;
188 }
189 
190 static
191 int fstatic(int n) {
192   int a = 0;
193   short aa = 0;
194   char aaa = 0;
195   int b[10];
196 
197   #pragma omp target teams distribute simd if(target: n>50)
198   for (int i = a; i < n; ++i) {
199     a += 1;
200     aa += 1;
201     aaa += 1;
202     b[2] += 1;
203   }
204 
205   return a;
206 }
207 
208 struct S1 {
209   double a;
210 
211   int r1(int n){
212     int b = n+1;
213     short int c[2][n];
214 
215     #pragma omp target teams distribute simd if(n>60)
216     for (int i = 0; i < 10; ++i) {
217       this->a = (double)b + 1.5;
218       c[1][1] = ++a;
219     }
220 
221     return c[1][1] + (int)b;
222   }
223 };
224 
225 int bar(int n){
226   int a = 0;
227 
228   a += foo(n);
229 
230   S1 S;
231   a += S.r1(n);
232 
233   a += fstatic(n);
234 
235   a += ftemplate<int>(n);
236 
237   return a;
238 }
239 
240 
241 
242 // We capture 2 VLA sizes in this target region
243 
244 
245 // The names below are not necessarily consistent with the names used for the
246 // addresses above as some are repeated.
247 
248 
249 
250 
251 
252 
253 
254 
255 
256 
257 
258 
259 
260 
261 
262 
263 
264 
265 
266 
267 
268 
269 // Check that the offloading functions are emitted and that the arguments are
270 // correct and loaded correctly for the target regions of the callees of bar().
271 
272 // Create local storage for each capture.
273 // Store captures in the context.
274 
275 
276 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
277 
278 
279 // Create local storage for each capture.
280 // Store captures in the context.
281 
282 
283 
284 
285 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
286 
287 // Create local storage for each capture.
288 // Store captures in the context.
289 
290 
291 
292 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
293 
294 
295 #endif
296 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooi
297 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
298 // CHECK1-NEXT:  entry:
299 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
300 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
301 // CHECK1-NEXT:    [[AA:%.*]] = alloca i16, align 2
302 // CHECK1-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
303 // CHECK1-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
304 // CHECK1-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
305 // CHECK1-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
306 // CHECK1-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
307 // CHECK1-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
308 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
309 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
310 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
311 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
312 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED4:%.*]] = alloca i64, align 8
313 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
314 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
315 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
316 // CHECK1-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
317 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
318 // CHECK1-NEXT:    [[AA_CASTED7:%.*]] = alloca i64, align 8
319 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS9:%.*]] = alloca [1 x i8*], align 8
320 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS10:%.*]] = alloca [1 x i8*], align 8
321 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS11:%.*]] = alloca [1 x i8*], align 8
322 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
323 // CHECK1-NEXT:    [[A_CASTED12:%.*]] = alloca i64, align 8
324 // CHECK1-NEXT:    [[AA_CASTED14:%.*]] = alloca i64, align 8
325 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [2 x i8*], align 8
326 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS17:%.*]] = alloca [2 x i8*], align 8
327 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [2 x i8*], align 8
328 // CHECK1-NEXT:    [[_TMP19:%.*]] = alloca i32, align 4
329 // CHECK1-NEXT:    [[A_CASTED22:%.*]] = alloca i64, align 8
330 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [9 x i8*], align 8
331 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS27:%.*]] = alloca [9 x i8*], align 8
332 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [9 x i8*], align 8
333 // CHECK1-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8
334 // CHECK1-NEXT:    [[_TMP29:%.*]] = alloca i32, align 4
335 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
336 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
337 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
338 // CHECK1-NEXT:    store i16 0, i16* [[AA]], align 2
339 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
340 // CHECK1-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
341 // CHECK1-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
342 // CHECK1-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
343 // CHECK1-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
344 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
345 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
346 // CHECK1-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
347 // CHECK1-NEXT:    [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
348 // CHECK1-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
349 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
350 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
351 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_]], align 4
352 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
353 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTCAPTURE_EXPR_2]], align 4
354 // CHECK1-NEXT:    [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
355 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
356 // CHECK1-NEXT:    store i16 [[TMP9]], i16* [[CONV]], align 2
357 // CHECK1-NEXT:    [[TMP10:%.*]] = load i64, i64* [[AA_CASTED]], align 8
358 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
359 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
360 // CHECK1-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
361 // CHECK1-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
362 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
363 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED4]] to i32*
364 // CHECK1-NEXT:    store i32 [[TMP13]], i32* [[CONV5]], align 4
365 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED4]], align 8
366 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
367 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64*
368 // CHECK1-NEXT:    store i64 [[TMP10]], i64* [[TMP16]], align 8
369 // CHECK1-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
370 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
371 // CHECK1-NEXT:    store i64 [[TMP10]], i64* [[TMP18]], align 8
372 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
373 // CHECK1-NEXT:    store i8* null, i8** [[TMP19]], align 8
374 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
375 // CHECK1-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64*
376 // CHECK1-NEXT:    store i64 [[TMP12]], i64* [[TMP21]], align 8
377 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
378 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
379 // CHECK1-NEXT:    store i64 [[TMP12]], i64* [[TMP23]], align 8
380 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
381 // CHECK1-NEXT:    store i8* null, i8** [[TMP24]], align 8
382 // CHECK1-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
383 // CHECK1-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64*
384 // CHECK1-NEXT:    store i64 [[TMP14]], i64* [[TMP26]], align 8
385 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
386 // CHECK1-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64*
387 // CHECK1-NEXT:    store i64 [[TMP14]], i64* [[TMP28]], align 8
388 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
389 // CHECK1-NEXT:    store i8* null, i8** [[TMP29]], align 8
390 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
391 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
392 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
393 // CHECK1-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2
394 // CHECK1-NEXT:    store i16 [[TMP33]], i16* [[TMP32]], align 4
395 // CHECK1-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
396 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
397 // CHECK1-NEXT:    store i32 [[TMP35]], i32* [[TMP34]], align 4
398 // CHECK1-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
399 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
400 // CHECK1-NEXT:    store i32 [[TMP37]], i32* [[TMP36]], align 4
401 // CHECK1-NEXT:    [[TMP38:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
402 // CHECK1-NEXT:    [[TMP39:%.*]] = bitcast i8* [[TMP38]] to %struct.kmp_task_t_with_privates*
403 // CHECK1-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 0
404 // CHECK1-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP40]], i32 0, i32 0
405 // CHECK1-NEXT:    [[TMP42:%.*]] = load i8*, i8** [[TMP41]], align 8
406 // CHECK1-NEXT:    [[TMP43:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
407 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i64 12, i1 false)
408 // CHECK1-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 1
409 // CHECK1-NEXT:    [[TMP45:%.*]] = bitcast i8* [[TMP42]] to %struct.anon*
410 // CHECK1-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 0
411 // CHECK1-NEXT:    [[TMP47:%.*]] = bitcast [3 x i8*]* [[TMP46]] to i8*
412 // CHECK1-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP30]] to i8*
413 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP47]], i8* align 8 [[TMP48]], i64 24, i1 false)
414 // CHECK1-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 1
415 // CHECK1-NEXT:    [[TMP50:%.*]] = bitcast [3 x i8*]* [[TMP49]] to i8*
416 // CHECK1-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP31]] to i8*
417 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP50]], i8* align 8 [[TMP51]], i64 24, i1 false)
418 // CHECK1-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 2
419 // CHECK1-NEXT:    [[TMP53:%.*]] = bitcast [3 x i64]* [[TMP52]] to i8*
420 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP53]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes to i8*), i64 24, i1 false)
421 // CHECK1-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 3
422 // CHECK1-NEXT:    [[TMP55:%.*]] = load i16, i16* [[AA]], align 2
423 // CHECK1-NEXT:    store i16 [[TMP55]], i16* [[TMP54]], align 8
424 // CHECK1-NEXT:    [[TMP56:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP38]])
425 // CHECK1-NEXT:    [[TMP57:%.*]] = load i32, i32* [[A]], align 4
426 // CHECK1-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
427 // CHECK1-NEXT:    store i32 [[TMP57]], i32* [[CONV6]], align 4
428 // CHECK1-NEXT:    [[TMP58:%.*]] = load i64, i64* [[A_CASTED]], align 8
429 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i64 [[TMP58]]) #[[ATTR4:[0-9]+]]
430 // CHECK1-NEXT:    [[TMP59:%.*]] = load i16, i16* [[AA]], align 2
431 // CHECK1-NEXT:    [[CONV8:%.*]] = bitcast i64* [[AA_CASTED7]] to i16*
432 // CHECK1-NEXT:    store i16 [[TMP59]], i16* [[CONV8]], align 2
433 // CHECK1-NEXT:    [[TMP60:%.*]] = load i64, i64* [[AA_CASTED7]], align 8
434 // CHECK1-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
435 // CHECK1-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64*
436 // CHECK1-NEXT:    store i64 [[TMP60]], i64* [[TMP62]], align 8
437 // CHECK1-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
438 // CHECK1-NEXT:    [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64*
439 // CHECK1-NEXT:    store i64 [[TMP60]], i64* [[TMP64]], align 8
440 // CHECK1-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS11]], i64 0, i64 0
441 // CHECK1-NEXT:    store i8* null, i8** [[TMP65]], align 8
442 // CHECK1-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
443 // CHECK1-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
444 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
445 // CHECK1-NEXT:    [[TMP68:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111.region_id, i32 1, i8** [[TMP66]], i8** [[TMP67]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
446 // CHECK1-NEXT:    [[TMP69:%.*]] = icmp ne i32 [[TMP68]], 0
447 // CHECK1-NEXT:    br i1 [[TMP69]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
448 // CHECK1:       omp_offload.failed:
449 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i64 [[TMP60]]) #[[ATTR4]]
450 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
451 // CHECK1:       omp_offload.cont:
452 // CHECK1-NEXT:    [[TMP70:%.*]] = load i32, i32* [[A]], align 4
453 // CHECK1-NEXT:    [[CONV13:%.*]] = bitcast i64* [[A_CASTED12]] to i32*
454 // CHECK1-NEXT:    store i32 [[TMP70]], i32* [[CONV13]], align 4
455 // CHECK1-NEXT:    [[TMP71:%.*]] = load i64, i64* [[A_CASTED12]], align 8
456 // CHECK1-NEXT:    [[TMP72:%.*]] = load i16, i16* [[AA]], align 2
457 // CHECK1-NEXT:    [[CONV15:%.*]] = bitcast i64* [[AA_CASTED14]] to i16*
458 // CHECK1-NEXT:    store i16 [[TMP72]], i16* [[CONV15]], align 2
459 // CHECK1-NEXT:    [[TMP73:%.*]] = load i64, i64* [[AA_CASTED14]], align 8
460 // CHECK1-NEXT:    [[TMP74:%.*]] = load i32, i32* [[N_ADDR]], align 4
461 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP74]], 10
462 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
463 // CHECK1:       omp_if.then:
464 // CHECK1-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
465 // CHECK1-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
466 // CHECK1-NEXT:    store i64 [[TMP71]], i64* [[TMP76]], align 8
467 // CHECK1-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
468 // CHECK1-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
469 // CHECK1-NEXT:    store i64 [[TMP71]], i64* [[TMP78]], align 8
470 // CHECK1-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0
471 // CHECK1-NEXT:    store i8* null, i8** [[TMP79]], align 8
472 // CHECK1-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
473 // CHECK1-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
474 // CHECK1-NEXT:    store i64 [[TMP73]], i64* [[TMP81]], align 8
475 // CHECK1-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
476 // CHECK1-NEXT:    [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64*
477 // CHECK1-NEXT:    store i64 [[TMP73]], i64* [[TMP83]], align 8
478 // CHECK1-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1
479 // CHECK1-NEXT:    store i8* null, i8** [[TMP84]], align 8
480 // CHECK1-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
481 // CHECK1-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
482 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
483 // CHECK1-NEXT:    [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118.region_id, i32 2, i8** [[TMP85]], i8** [[TMP86]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
484 // CHECK1-NEXT:    [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0
485 // CHECK1-NEXT:    br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
486 // CHECK1:       omp_offload.failed20:
487 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP71]], i64 [[TMP73]]) #[[ATTR4]]
488 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT21]]
489 // CHECK1:       omp_offload.cont21:
490 // CHECK1-NEXT:    br label [[OMP_IF_END:%.*]]
491 // CHECK1:       omp_if.else:
492 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP71]], i64 [[TMP73]]) #[[ATTR4]]
493 // CHECK1-NEXT:    br label [[OMP_IF_END]]
494 // CHECK1:       omp_if.end:
495 // CHECK1-NEXT:    [[TMP89:%.*]] = load i32, i32* [[A]], align 4
496 // CHECK1-NEXT:    [[CONV23:%.*]] = bitcast i64* [[A_CASTED22]] to i32*
497 // CHECK1-NEXT:    store i32 [[TMP89]], i32* [[CONV23]], align 4
498 // CHECK1-NEXT:    [[TMP90:%.*]] = load i64, i64* [[A_CASTED22]], align 8
499 // CHECK1-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N_ADDR]], align 4
500 // CHECK1-NEXT:    [[CMP24:%.*]] = icmp sgt i32 [[TMP91]], 20
501 // CHECK1-NEXT:    br i1 [[CMP24]], label [[OMP_IF_THEN25:%.*]], label [[OMP_IF_ELSE32:%.*]]
502 // CHECK1:       omp_if.then25:
503 // CHECK1-NEXT:    [[TMP92:%.*]] = mul nuw i64 [[TMP2]], 4
504 // CHECK1-NEXT:    [[TMP93:%.*]] = mul nuw i64 5, [[TMP5]]
505 // CHECK1-NEXT:    [[TMP94:%.*]] = mul nuw i64 [[TMP93]], 8
506 // CHECK1-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
507 // CHECK1-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64*
508 // CHECK1-NEXT:    store i64 [[TMP90]], i64* [[TMP96]], align 8
509 // CHECK1-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
510 // CHECK1-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64*
511 // CHECK1-NEXT:    store i64 [[TMP90]], i64* [[TMP98]], align 8
512 // CHECK1-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
513 // CHECK1-NEXT:    store i64 4, i64* [[TMP99]], align 8
514 // CHECK1-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0
515 // CHECK1-NEXT:    store i8* null, i8** [[TMP100]], align 8
516 // CHECK1-NEXT:    [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1
517 // CHECK1-NEXT:    [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]**
518 // CHECK1-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8
519 // CHECK1-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1
520 // CHECK1-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]**
521 // CHECK1-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8
522 // CHECK1-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
523 // CHECK1-NEXT:    store i64 40, i64* [[TMP105]], align 8
524 // CHECK1-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1
525 // CHECK1-NEXT:    store i8* null, i8** [[TMP106]], align 8
526 // CHECK1-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2
527 // CHECK1-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
528 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP108]], align 8
529 // CHECK1-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2
530 // CHECK1-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64*
531 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP110]], align 8
532 // CHECK1-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
533 // CHECK1-NEXT:    store i64 8, i64* [[TMP111]], align 8
534 // CHECK1-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2
535 // CHECK1-NEXT:    store i8* null, i8** [[TMP112]], align 8
536 // CHECK1-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3
537 // CHECK1-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float**
538 // CHECK1-NEXT:    store float* [[VLA]], float** [[TMP114]], align 8
539 // CHECK1-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3
540 // CHECK1-NEXT:    [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float**
541 // CHECK1-NEXT:    store float* [[VLA]], float** [[TMP116]], align 8
542 // CHECK1-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
543 // CHECK1-NEXT:    store i64 [[TMP92]], i64* [[TMP117]], align 8
544 // CHECK1-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3
545 // CHECK1-NEXT:    store i8* null, i8** [[TMP118]], align 8
546 // CHECK1-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4
547 // CHECK1-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]**
548 // CHECK1-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8
549 // CHECK1-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4
550 // CHECK1-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]**
551 // CHECK1-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8
552 // CHECK1-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
553 // CHECK1-NEXT:    store i64 400, i64* [[TMP123]], align 8
554 // CHECK1-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4
555 // CHECK1-NEXT:    store i8* null, i8** [[TMP124]], align 8
556 // CHECK1-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5
557 // CHECK1-NEXT:    [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64*
558 // CHECK1-NEXT:    store i64 5, i64* [[TMP126]], align 8
559 // CHECK1-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5
560 // CHECK1-NEXT:    [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64*
561 // CHECK1-NEXT:    store i64 5, i64* [[TMP128]], align 8
562 // CHECK1-NEXT:    [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
563 // CHECK1-NEXT:    store i64 8, i64* [[TMP129]], align 8
564 // CHECK1-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5
565 // CHECK1-NEXT:    store i8* null, i8** [[TMP130]], align 8
566 // CHECK1-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6
567 // CHECK1-NEXT:    [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64*
568 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP132]], align 8
569 // CHECK1-NEXT:    [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6
570 // CHECK1-NEXT:    [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64*
571 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP134]], align 8
572 // CHECK1-NEXT:    [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
573 // CHECK1-NEXT:    store i64 8, i64* [[TMP135]], align 8
574 // CHECK1-NEXT:    [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6
575 // CHECK1-NEXT:    store i8* null, i8** [[TMP136]], align 8
576 // CHECK1-NEXT:    [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7
577 // CHECK1-NEXT:    [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double**
578 // CHECK1-NEXT:    store double* [[VLA1]], double** [[TMP138]], align 8
579 // CHECK1-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7
580 // CHECK1-NEXT:    [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double**
581 // CHECK1-NEXT:    store double* [[VLA1]], double** [[TMP140]], align 8
582 // CHECK1-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
583 // CHECK1-NEXT:    store i64 [[TMP94]], i64* [[TMP141]], align 8
584 // CHECK1-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7
585 // CHECK1-NEXT:    store i8* null, i8** [[TMP142]], align 8
586 // CHECK1-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8
587 // CHECK1-NEXT:    [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT**
588 // CHECK1-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8
589 // CHECK1-NEXT:    [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8
590 // CHECK1-NEXT:    [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT**
591 // CHECK1-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8
592 // CHECK1-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
593 // CHECK1-NEXT:    store i64 16, i64* [[TMP147]], align 8
594 // CHECK1-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8
595 // CHECK1-NEXT:    store i8* null, i8** [[TMP148]], align 8
596 // CHECK1-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
597 // CHECK1-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
598 // CHECK1-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
599 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
600 // CHECK1-NEXT:    [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
601 // CHECK1-NEXT:    [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0
602 // CHECK1-NEXT:    br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
603 // CHECK1:       omp_offload.failed30:
604 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
605 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
606 // CHECK1:       omp_offload.cont31:
607 // CHECK1-NEXT:    br label [[OMP_IF_END33:%.*]]
608 // CHECK1:       omp_if.else32:
609 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
610 // CHECK1-NEXT:    br label [[OMP_IF_END33]]
611 // CHECK1:       omp_if.end33:
612 // CHECK1-NEXT:    [[TMP154:%.*]] = load i32, i32* [[A]], align 4
613 // CHECK1-NEXT:    [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
614 // CHECK1-NEXT:    call void @llvm.stackrestore(i8* [[TMP155]])
615 // CHECK1-NEXT:    ret i32 [[TMP154]]
616 //
617 //
618 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
619 // CHECK1-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
620 // CHECK1-NEXT:  entry:
621 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
622 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
623 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
624 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
625 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
626 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
627 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
628 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8
629 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
630 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
631 // CHECK1-NEXT:    [[CONV4:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32*
632 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV3]], align 4
633 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV4]], align 4
634 // CHECK1-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
635 // CHECK1-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
636 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
637 // CHECK1-NEXT:    store i16 [[TMP3]], i16* [[CONV5]], align 2
638 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
639 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP4]])
640 // CHECK1-NEXT:    ret void
641 //
642 //
643 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
644 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
645 // CHECK1-NEXT:  entry:
646 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
647 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
648 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
649 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
650 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
651 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
652 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
653 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
654 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
655 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
656 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
657 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
658 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
659 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
660 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
661 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
662 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
663 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
664 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
665 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
666 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
667 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
668 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
669 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
670 // CHECK1:       cond.true:
671 // CHECK1-NEXT:    br label [[COND_END:%.*]]
672 // CHECK1:       cond.false:
673 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
674 // CHECK1-NEXT:    br label [[COND_END]]
675 // CHECK1:       cond.end:
676 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
677 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
678 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
679 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
680 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
681 // CHECK1:       omp.inner.for.cond:
682 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
683 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
684 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
685 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
686 // CHECK1:       omp.inner.for.body:
687 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
688 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
689 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
690 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
691 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
692 // CHECK1:       omp.body.continue:
693 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
694 // CHECK1:       omp.inner.for.inc:
695 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
696 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
697 // CHECK1-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
698 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
699 // CHECK1:       omp.inner.for.end:
700 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
701 // CHECK1:       omp.loop.exit:
702 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
703 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
704 // CHECK1-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
705 // CHECK1-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
706 // CHECK1:       .omp.final.then:
707 // CHECK1-NEXT:    store i32 10, i32* [[I]], align 4
708 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
709 // CHECK1:       .omp.final.done:
710 // CHECK1-NEXT:    ret void
711 //
712 //
713 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_privates_map.
714 // CHECK1-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i16** noalias noundef [[TMP1:%.*]], [3 x i8*]** noalias noundef [[TMP2:%.*]], [3 x i8*]** noalias noundef [[TMP3:%.*]], [3 x i64]** noalias noundef [[TMP4:%.*]]) #[[ATTR5:[0-9]+]] {
715 // CHECK1-NEXT:  entry:
716 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
717 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i16**, align 8
718 // CHECK1-NEXT:    [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 8
719 // CHECK1-NEXT:    [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 8
720 // CHECK1-NEXT:    [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 8
721 // CHECK1-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
722 // CHECK1-NEXT:    store i16** [[TMP1]], i16*** [[DOTADDR1]], align 8
723 // CHECK1-NEXT:    store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 8
724 // CHECK1-NEXT:    store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 8
725 // CHECK1-NEXT:    store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 8
726 // CHECK1-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
727 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0
728 // CHECK1-NEXT:    [[TMP7:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 8
729 // CHECK1-NEXT:    store [3 x i8*]* [[TMP6]], [3 x i8*]** [[TMP7]], align 8
730 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1
731 // CHECK1-NEXT:    [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 8
732 // CHECK1-NEXT:    store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 8
733 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2
734 // CHECK1-NEXT:    [[TMP11:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 8
735 // CHECK1-NEXT:    store [3 x i64]* [[TMP10]], [3 x i64]** [[TMP11]], align 8
736 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3
737 // CHECK1-NEXT:    [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 8
738 // CHECK1-NEXT:    store i16* [[TMP12]], i16** [[TMP13]], align 8
739 // CHECK1-NEXT:    ret void
740 //
741 //
742 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry.
743 // CHECK1-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
744 // CHECK1-NEXT:  entry:
745 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
746 // CHECK1-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
747 // CHECK1-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
748 // CHECK1-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
749 // CHECK1-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
750 // CHECK1-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
751 // CHECK1-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 8
752 // CHECK1-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 8
753 // CHECK1-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 8
754 // CHECK1-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 8
755 // CHECK1-NEXT:    [[AA_CASTED_I:%.*]] = alloca i64, align 8
756 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED_I:%.*]] = alloca i64, align 8
757 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED5_I:%.*]] = alloca i64, align 8
758 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
759 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
760 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
761 // CHECK1-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
762 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
763 // CHECK1-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
764 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
765 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
766 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
767 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
768 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
769 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
770 // CHECK1-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
771 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
772 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
773 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
774 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
775 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
776 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26
777 // CHECK1-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !26
778 // CHECK1-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
779 // CHECK1-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
780 // CHECK1-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !26
781 // CHECK1-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
782 // CHECK1-NEXT:    [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
783 // CHECK1-NEXT:    [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
784 // CHECK1-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
785 // CHECK1-NEXT:    [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)*
786 // CHECK1-NEXT:    call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
787 // CHECK1-NEXT:    [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !26
788 // CHECK1-NEXT:    [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !26
789 // CHECK1-NEXT:    [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !26
790 // CHECK1-NEXT:    [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !26
791 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i64 0, i64 0
792 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i64 0, i64 0
793 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i64 0, i64 0
794 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
795 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
796 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[TMP23]], align 4
797 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) #[[ATTR4]]
798 // CHECK1-NEXT:    [[TMP26:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 [[TMP25]], i32 1, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
799 // CHECK1-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
800 // CHECK1-NEXT:    br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
801 // CHECK1:       omp_offload.failed.i:
802 // CHECK1-NEXT:    [[TMP28:%.*]] = load i16, i16* [[TMP16]], align 2
803 // CHECK1-NEXT:    [[CONV_I:%.*]] = bitcast i64* [[AA_CASTED_I]] to i16*
804 // CHECK1-NEXT:    store i16 [[TMP28]], i16* [[CONV_I]], align 2, !noalias !26
805 // CHECK1-NEXT:    [[TMP29:%.*]] = load i64, i64* [[AA_CASTED_I]], align 8, !noalias !26
806 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP23]], align 4
807 // CHECK1-NEXT:    [[CONV4_I:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED_I]] to i32*
808 // CHECK1-NEXT:    store i32 [[TMP30]], i32* [[CONV4_I]], align 4, !noalias !26
809 // CHECK1-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !26
810 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP24]], align 4
811 // CHECK1-NEXT:    [[CONV6_I:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED5_I]] to i32*
812 // CHECK1-NEXT:    store i32 [[TMP32]], i32* [[CONV6_I]], align 4, !noalias !26
813 // CHECK1-NEXT:    [[TMP33:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED5_I]], align 8, !noalias !26
814 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i64 [[TMP29]], i64 [[TMP31]], i64 [[TMP33]]) #[[ATTR4]]
815 // CHECK1-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
816 // CHECK1:       .omp_outlined..1.exit:
817 // CHECK1-NEXT:    ret i32 0
818 //
819 //
820 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
821 // CHECK1-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
822 // CHECK1-NEXT:  entry:
823 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
824 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
825 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
826 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
827 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
828 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
829 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[CONV1]], align 4
830 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
831 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]])
832 // CHECK1-NEXT:    ret void
833 //
834 //
835 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
836 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
837 // CHECK1-NEXT:  entry:
838 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
839 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
840 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
841 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
842 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
843 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
844 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
845 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
846 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
847 // CHECK1-NEXT:    [[A1:%.*]] = alloca i32, align 4
848 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
849 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
850 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
851 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
852 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
853 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
854 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
855 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
856 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
857 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
858 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
859 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
860 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
861 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
862 // CHECK1:       cond.true:
863 // CHECK1-NEXT:    br label [[COND_END:%.*]]
864 // CHECK1:       cond.false:
865 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
866 // CHECK1-NEXT:    br label [[COND_END]]
867 // CHECK1:       cond.end:
868 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
869 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
870 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
871 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
872 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
873 // CHECK1:       omp.inner.for.cond:
874 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
875 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
876 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
877 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
878 // CHECK1:       omp.inner.for.body:
879 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
880 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
881 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
882 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4
883 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A1]], align 4
884 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
885 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[A1]], align 4
886 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
887 // CHECK1:       omp.body.continue:
888 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
889 // CHECK1:       omp.inner.for.inc:
890 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
891 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1
892 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
893 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
894 // CHECK1:       omp.inner.for.end:
895 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
896 // CHECK1:       omp.loop.exit:
897 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
898 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
899 // CHECK1-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
900 // CHECK1-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
901 // CHECK1:       .omp.final.then:
902 // CHECK1-NEXT:    store i32 10, i32* [[CONV]], align 4
903 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
904 // CHECK1:       .omp.final.done:
905 // CHECK1-NEXT:    ret void
906 //
907 //
908 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
909 // CHECK1-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR2]] {
910 // CHECK1-NEXT:  entry:
911 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
912 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
913 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
914 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
915 // CHECK1-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
916 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
917 // CHECK1-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
918 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
919 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]])
920 // CHECK1-NEXT:    ret void
921 //
922 //
923 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
924 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
925 // CHECK1-NEXT:  entry:
926 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
927 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
928 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
929 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
930 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
931 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
932 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
933 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
934 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
935 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
936 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
937 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
938 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
939 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
940 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
941 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
942 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
943 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
944 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
945 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
946 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
947 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
948 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
949 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
950 // CHECK1:       cond.true:
951 // CHECK1-NEXT:    br label [[COND_END:%.*]]
952 // CHECK1:       cond.false:
953 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
954 // CHECK1-NEXT:    br label [[COND_END]]
955 // CHECK1:       cond.end:
956 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
957 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
958 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
959 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
960 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
961 // CHECK1:       omp.inner.for.cond:
962 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
963 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
964 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
965 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
966 // CHECK1:       omp.inner.for.body:
967 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
968 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
969 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
970 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !29
971 // CHECK1-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !29
972 // CHECK1-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
973 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
974 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
975 // CHECK1-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !29
976 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
977 // CHECK1:       omp.body.continue:
978 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
979 // CHECK1:       omp.inner.for.inc:
980 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
981 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
982 // CHECK1-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
983 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
984 // CHECK1:       omp.inner.for.end:
985 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
986 // CHECK1:       omp.loop.exit:
987 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
988 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
989 // CHECK1-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
990 // CHECK1-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
991 // CHECK1:       .omp.final.then:
992 // CHECK1-NEXT:    store i32 10, i32* [[I]], align 4
993 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
994 // CHECK1:       .omp.final.done:
995 // CHECK1-NEXT:    ret void
996 //
997 //
998 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
999 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] {
1000 // CHECK1-NEXT:  entry:
1001 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1002 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1003 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1004 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1005 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1006 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1007 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1008 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1009 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
1010 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1011 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
1012 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
1013 // CHECK1-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
1014 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1015 // CHECK1-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
1016 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1017 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
1018 // CHECK1-NEXT:    ret void
1019 //
1020 //
1021 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
1022 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
1023 // CHECK1-NEXT:  entry:
1024 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1025 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1026 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1027 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1028 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1029 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1030 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1031 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1032 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1033 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1034 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1035 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1036 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1037 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1038 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1039 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1040 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1041 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1042 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
1043 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1044 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1045 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1046 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1047 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1048 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1049 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
1050 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1051 // CHECK1:       cond.true:
1052 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1053 // CHECK1:       cond.false:
1054 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1055 // CHECK1-NEXT:    br label [[COND_END]]
1056 // CHECK1:       cond.end:
1057 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1058 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1059 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1060 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
1061 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1062 // CHECK1:       omp.inner.for.cond:
1063 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1064 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
1065 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1066 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1067 // CHECK1:       omp.inner.for.body:
1068 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1069 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1070 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1071 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !32
1072 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !32
1073 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
1074 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !32
1075 // CHECK1-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !32
1076 // CHECK1-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
1077 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
1078 // CHECK1-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
1079 // CHECK1-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !32
1080 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1081 // CHECK1:       omp.body.continue:
1082 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1083 // CHECK1:       omp.inner.for.inc:
1084 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1085 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
1086 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1087 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
1088 // CHECK1:       omp.inner.for.end:
1089 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1090 // CHECK1:       omp.loop.exit:
1091 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1092 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1093 // CHECK1-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
1094 // CHECK1-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1095 // CHECK1:       .omp.final.then:
1096 // CHECK1-NEXT:    store i32 10, i32* [[I]], align 4
1097 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1098 // CHECK1:       .omp.final.done:
1099 // CHECK1-NEXT:    ret void
1100 //
1101 //
1102 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
1103 // CHECK1-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
1104 // CHECK1-NEXT:  entry:
1105 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1106 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
1107 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1108 // CHECK1-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
1109 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
1110 // CHECK1-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
1111 // CHECK1-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
1112 // CHECK1-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
1113 // CHECK1-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
1114 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1115 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1116 // CHECK1-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
1117 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1118 // CHECK1-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
1119 // CHECK1-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
1120 // CHECK1-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
1121 // CHECK1-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
1122 // CHECK1-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
1123 // CHECK1-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
1124 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1125 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
1126 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1127 // CHECK1-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
1128 // CHECK1-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
1129 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
1130 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
1131 // CHECK1-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
1132 // CHECK1-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
1133 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
1134 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1135 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[CONV5]], align 4
1136 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
1137 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
1138 // CHECK1-NEXT:    ret void
1139 //
1140 //
1141 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..9
1142 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR3]] {
1143 // CHECK1-NEXT:  entry:
1144 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1145 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1146 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1147 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
1148 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1149 // CHECK1-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
1150 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
1151 // CHECK1-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
1152 // CHECK1-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
1153 // CHECK1-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
1154 // CHECK1-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
1155 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1156 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1157 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1158 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1159 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1160 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1161 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1162 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1163 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1164 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1165 // CHECK1-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
1166 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1167 // CHECK1-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
1168 // CHECK1-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
1169 // CHECK1-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
1170 // CHECK1-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
1171 // CHECK1-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
1172 // CHECK1-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
1173 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1174 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
1175 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1176 // CHECK1-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
1177 // CHECK1-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
1178 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
1179 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
1180 // CHECK1-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
1181 // CHECK1-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
1182 // CHECK1-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 0
1183 // CHECK1-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
1184 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1185 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
1186 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1187 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1188 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1189 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1190 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1191 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1192 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
1193 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1194 // CHECK1:       cond.true:
1195 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1196 // CHECK1:       cond.false:
1197 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1198 // CHECK1-NEXT:    br label [[COND_END]]
1199 // CHECK1:       cond.end:
1200 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
1201 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1202 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1203 // CHECK1-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
1204 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1205 // CHECK1:       omp.inner.for.cond:
1206 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1207 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
1208 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
1209 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1210 // CHECK1:       omp.inner.for.body:
1211 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1212 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
1213 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1214 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !35
1215 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !35
1216 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
1217 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[CONV]], align 4, !llvm.access.group !35
1218 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
1219 // CHECK1-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35
1220 // CHECK1-NEXT:    [[CONV7:%.*]] = fpext float [[TMP17]] to double
1221 // CHECK1-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
1222 // CHECK1-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
1223 // CHECK1-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35
1224 // CHECK1-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
1225 // CHECK1-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !35
1226 // CHECK1-NEXT:    [[CONV11:%.*]] = fpext float [[TMP18]] to double
1227 // CHECK1-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
1228 // CHECK1-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
1229 // CHECK1-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !35
1230 // CHECK1-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
1231 // CHECK1-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i64 0, i64 2
1232 // CHECK1-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !35
1233 // CHECK1-NEXT:    [[ADD16:%.*]] = fadd double [[TMP19]], 1.000000e+00
1234 // CHECK1-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !35
1235 // CHECK1-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]]
1236 // CHECK1-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP20]]
1237 // CHECK1-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i64 3
1238 // CHECK1-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !35
1239 // CHECK1-NEXT:    [[ADD19:%.*]] = fadd double [[TMP21]], 1.000000e+00
1240 // CHECK1-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !35
1241 // CHECK1-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
1242 // CHECK1-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35
1243 // CHECK1-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP22]], 1
1244 // CHECK1-NEXT:    store i64 [[ADD20]], i64* [[X]], align 8, !llvm.access.group !35
1245 // CHECK1-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
1246 // CHECK1-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35
1247 // CHECK1-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP23]] to i32
1248 // CHECK1-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
1249 // CHECK1-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
1250 // CHECK1-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 8, !llvm.access.group !35
1251 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1252 // CHECK1:       omp.body.continue:
1253 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1254 // CHECK1:       omp.inner.for.inc:
1255 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1256 // CHECK1-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP24]], 1
1257 // CHECK1-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1258 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
1259 // CHECK1:       omp.inner.for.end:
1260 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1261 // CHECK1:       omp.loop.exit:
1262 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
1263 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1264 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1265 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1266 // CHECK1:       .omp.final.then:
1267 // CHECK1-NEXT:    store i32 10, i32* [[I]], align 4
1268 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1269 // CHECK1:       .omp.final.done:
1270 // CHECK1-NEXT:    ret void
1271 //
1272 //
1273 // CHECK1-LABEL: define {{[^@]+}}@_Z3bari
1274 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
1275 // CHECK1-NEXT:  entry:
1276 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1277 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
1278 // CHECK1-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
1279 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1280 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
1281 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
1282 // CHECK1-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
1283 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
1284 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
1285 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
1286 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
1287 // CHECK1-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
1288 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
1289 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
1290 // CHECK1-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
1291 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
1292 // CHECK1-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
1293 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
1294 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
1295 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
1296 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
1297 // CHECK1-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
1298 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
1299 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
1300 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
1301 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
1302 // CHECK1-NEXT:    ret i32 [[TMP8]]
1303 //
1304 //
1305 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
1306 // CHECK1-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
1307 // CHECK1-NEXT:  entry:
1308 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1309 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1310 // CHECK1-NEXT:    [[B:%.*]] = alloca i32, align 4
1311 // CHECK1-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
1312 // CHECK1-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
1313 // CHECK1-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
1314 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
1315 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
1316 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
1317 // CHECK1-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
1318 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1319 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1320 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1321 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1322 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
1323 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
1324 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
1325 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1326 // CHECK1-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
1327 // CHECK1-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
1328 // CHECK1-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
1329 // CHECK1-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
1330 // CHECK1-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
1331 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
1332 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B]], align 4
1333 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
1334 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[CONV]], align 4
1335 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
1336 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4
1337 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60
1338 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
1339 // CHECK1:       omp_if.then:
1340 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
1341 // CHECK1-NEXT:    [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]]
1342 // CHECK1-NEXT:    [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
1343 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1344 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
1345 // CHECK1-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8
1346 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1347 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
1348 // CHECK1-NEXT:    store double* [[A]], double** [[TMP13]], align 8
1349 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
1350 // CHECK1-NEXT:    store i64 8, i64* [[TMP14]], align 8
1351 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1352 // CHECK1-NEXT:    store i8* null, i8** [[TMP15]], align 8
1353 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1354 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
1355 // CHECK1-NEXT:    store i64 [[TMP6]], i64* [[TMP17]], align 8
1356 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1357 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
1358 // CHECK1-NEXT:    store i64 [[TMP6]], i64* [[TMP19]], align 8
1359 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
1360 // CHECK1-NEXT:    store i64 4, i64* [[TMP20]], align 8
1361 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1362 // CHECK1-NEXT:    store i8* null, i8** [[TMP21]], align 8
1363 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1364 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
1365 // CHECK1-NEXT:    store i64 2, i64* [[TMP23]], align 8
1366 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1367 // CHECK1-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
1368 // CHECK1-NEXT:    store i64 2, i64* [[TMP25]], align 8
1369 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
1370 // CHECK1-NEXT:    store i64 8, i64* [[TMP26]], align 8
1371 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1372 // CHECK1-NEXT:    store i8* null, i8** [[TMP27]], align 8
1373 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
1374 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
1375 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP29]], align 8
1376 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
1377 // CHECK1-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
1378 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP31]], align 8
1379 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
1380 // CHECK1-NEXT:    store i64 8, i64* [[TMP32]], align 8
1381 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
1382 // CHECK1-NEXT:    store i8* null, i8** [[TMP33]], align 8
1383 // CHECK1-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
1384 // CHECK1-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
1385 // CHECK1-NEXT:    store i16* [[VLA]], i16** [[TMP35]], align 8
1386 // CHECK1-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
1387 // CHECK1-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
1388 // CHECK1-NEXT:    store i16* [[VLA]], i16** [[TMP37]], align 8
1389 // CHECK1-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
1390 // CHECK1-NEXT:    store i64 [[TMP9]], i64* [[TMP38]], align 8
1391 // CHECK1-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
1392 // CHECK1-NEXT:    store i8* null, i8** [[TMP39]], align 8
1393 // CHECK1-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1394 // CHECK1-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1395 // CHECK1-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
1396 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
1397 // CHECK1-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
1398 // CHECK1-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
1399 // CHECK1-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1400 // CHECK1:       omp_offload.failed:
1401 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]]
1402 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1403 // CHECK1:       omp_offload.cont:
1404 // CHECK1-NEXT:    br label [[OMP_IF_END:%.*]]
1405 // CHECK1:       omp_if.else:
1406 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]]
1407 // CHECK1-NEXT:    br label [[OMP_IF_END]]
1408 // CHECK1:       omp_if.end:
1409 // CHECK1-NEXT:    [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]]
1410 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]]
1411 // CHECK1-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
1412 // CHECK1-NEXT:    [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
1413 // CHECK1-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP46]] to i32
1414 // CHECK1-NEXT:    [[TMP47:%.*]] = load i32, i32* [[B]], align 4
1415 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]]
1416 // CHECK1-NEXT:    [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
1417 // CHECK1-NEXT:    call void @llvm.stackrestore(i8* [[TMP48]])
1418 // CHECK1-NEXT:    ret i32 [[ADD4]]
1419 //
1420 //
1421 // CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici
1422 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
1423 // CHECK1-NEXT:  entry:
1424 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1425 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
1426 // CHECK1-NEXT:    [[AA:%.*]] = alloca i16, align 2
1427 // CHECK1-NEXT:    [[AAA:%.*]] = alloca i8, align 1
1428 // CHECK1-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
1429 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1430 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
1431 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1432 // CHECK1-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
1433 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
1434 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
1435 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
1436 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1437 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1438 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
1439 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
1440 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1441 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
1442 // CHECK1-NEXT:    store i16 0, i16* [[AA]], align 2
1443 // CHECK1-NEXT:    store i8 0, i8* [[AAA]], align 1
1444 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
1445 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1446 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
1447 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
1448 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
1449 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
1450 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
1451 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
1452 // CHECK1-NEXT:    [[TMP4:%.*]] = load i16, i16* [[AA]], align 2
1453 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1454 // CHECK1-NEXT:    store i16 [[TMP4]], i16* [[CONV2]], align 2
1455 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1456 // CHECK1-NEXT:    [[TMP6:%.*]] = load i8, i8* [[AAA]], align 1
1457 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
1458 // CHECK1-NEXT:    store i8 [[TMP6]], i8* [[CONV3]], align 1
1459 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
1460 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[N_ADDR]], align 4
1461 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50
1462 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
1463 // CHECK1:       omp_if.then:
1464 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1465 // CHECK1-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
1466 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
1467 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1468 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64*
1469 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP12]], align 8
1470 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1471 // CHECK1-NEXT:    store i8* null, i8** [[TMP13]], align 8
1472 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1473 // CHECK1-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
1474 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
1475 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1476 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
1477 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP17]], align 8
1478 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1479 // CHECK1-NEXT:    store i8* null, i8** [[TMP18]], align 8
1480 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1481 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
1482 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP20]], align 8
1483 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1484 // CHECK1-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
1485 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 8
1486 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1487 // CHECK1-NEXT:    store i8* null, i8** [[TMP23]], align 8
1488 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
1489 // CHECK1-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
1490 // CHECK1-NEXT:    store i64 [[TMP7]], i64* [[TMP25]], align 8
1491 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
1492 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
1493 // CHECK1-NEXT:    store i64 [[TMP7]], i64* [[TMP27]], align 8
1494 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
1495 // CHECK1-NEXT:    store i8* null, i8** [[TMP28]], align 8
1496 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
1497 // CHECK1-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [10 x i32]**
1498 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP30]], align 8
1499 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
1500 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [10 x i32]**
1501 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP32]], align 8
1502 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
1503 // CHECK1-NEXT:    store i8* null, i8** [[TMP33]], align 8
1504 // CHECK1-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1505 // CHECK1-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1506 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
1507 // CHECK1-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_]], align 4
1508 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N_ADDR]], align 4
1509 // CHECK1-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_4]], align 4
1510 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
1511 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1512 // CHECK1-NEXT:    [[SUB:%.*]] = sub i32 [[TMP38]], [[TMP39]]
1513 // CHECK1-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
1514 // CHECK1-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
1515 // CHECK1-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
1516 // CHECK1-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
1517 // CHECK1-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
1518 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
1519 // CHECK1-NEXT:    [[ADD8:%.*]] = add i32 [[TMP40]], 1
1520 // CHECK1-NEXT:    [[TMP41:%.*]] = zext i32 [[ADD8]] to i64
1521 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]])
1522 // CHECK1-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
1523 // CHECK1-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
1524 // CHECK1-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1525 // CHECK1:       omp_offload.failed:
1526 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
1527 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1528 // CHECK1:       omp_offload.cont:
1529 // CHECK1-NEXT:    br label [[OMP_IF_END:%.*]]
1530 // CHECK1:       omp_if.else:
1531 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
1532 // CHECK1-NEXT:    br label [[OMP_IF_END]]
1533 // CHECK1:       omp_if.end:
1534 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
1535 // CHECK1-NEXT:    ret i32 [[TMP44]]
1536 //
1537 //
1538 // CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
1539 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
1540 // CHECK1-NEXT:  entry:
1541 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1542 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
1543 // CHECK1-NEXT:    [[AA:%.*]] = alloca i16, align 2
1544 // CHECK1-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
1545 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1546 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1547 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
1548 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
1549 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
1550 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1551 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1552 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
1553 // CHECK1-NEXT:    store i16 0, i16* [[AA]], align 2
1554 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
1555 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1556 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
1557 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
1558 // CHECK1-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
1559 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1560 // CHECK1-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
1561 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1562 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
1563 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
1564 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
1565 // CHECK1:       omp_if.then:
1566 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1567 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
1568 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
1569 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1570 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
1571 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
1572 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1573 // CHECK1-NEXT:    store i8* null, i8** [[TMP9]], align 8
1574 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1575 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
1576 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
1577 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1578 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
1579 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
1580 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1581 // CHECK1-NEXT:    store i8* null, i8** [[TMP14]], align 8
1582 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1583 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
1584 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
1585 // CHECK1-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1586 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
1587 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
1588 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1589 // CHECK1-NEXT:    store i8* null, i8** [[TMP19]], align 8
1590 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1591 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1592 // CHECK1-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
1593 // CHECK1-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
1594 // CHECK1-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
1595 // CHECK1-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1596 // CHECK1:       omp_offload.failed:
1597 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
1598 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1599 // CHECK1:       omp_offload.cont:
1600 // CHECK1-NEXT:    br label [[OMP_IF_END:%.*]]
1601 // CHECK1:       omp_if.else:
1602 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
1603 // CHECK1-NEXT:    br label [[OMP_IF_END]]
1604 // CHECK1:       omp_if.end:
1605 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
1606 // CHECK1-NEXT:    ret i32 [[TMP24]]
1607 //
1608 //
1609 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
1610 // CHECK1-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
1611 // CHECK1-NEXT:  entry:
1612 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1613 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
1614 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1615 // CHECK1-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
1616 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
1617 // CHECK1-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
1618 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1619 // CHECK1-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
1620 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1621 // CHECK1-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
1622 // CHECK1-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
1623 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1624 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
1625 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1626 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
1627 // CHECK1-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
1628 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
1629 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
1630 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
1631 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
1632 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
1633 // CHECK1-NEXT:    ret void
1634 //
1635 //
1636 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
1637 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] {
1638 // CHECK1-NEXT:  entry:
1639 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1640 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1641 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1642 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
1643 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1644 // CHECK1-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
1645 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
1646 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1647 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1648 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1649 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1650 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1651 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1652 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1653 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1654 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1655 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1656 // CHECK1-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
1657 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1658 // CHECK1-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
1659 // CHECK1-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
1660 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1661 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
1662 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1663 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
1664 // CHECK1-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
1665 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1666 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
1667 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1668 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1669 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1670 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
1671 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1672 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1673 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
1674 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1675 // CHECK1:       cond.true:
1676 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1677 // CHECK1:       cond.false:
1678 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1679 // CHECK1-NEXT:    br label [[COND_END]]
1680 // CHECK1:       cond.end:
1681 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
1682 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1683 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1684 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
1685 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1686 // CHECK1:       omp.inner.for.cond:
1687 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
1688 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
1689 // CHECK1-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1690 // CHECK1-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1691 // CHECK1:       omp.inner.for.body:
1692 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
1693 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
1694 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1695 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !38
1696 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !38
1697 // CHECK1-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double
1698 // CHECK1-NEXT:    [[ADD5:%.*]] = fadd double [[CONV4]], 1.500000e+00
1699 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
1700 // CHECK1-NEXT:    store double [[ADD5]], double* [[A]], align 8, !llvm.access.group !38
1701 // CHECK1-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
1702 // CHECK1-NEXT:    [[TMP13:%.*]] = load double, double* [[A6]], align 8, !llvm.access.group !38
1703 // CHECK1-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
1704 // CHECK1-NEXT:    store double [[INC]], double* [[A6]], align 8, !llvm.access.group !38
1705 // CHECK1-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
1706 // CHECK1-NEXT:    [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]]
1707 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]]
1708 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
1709 // CHECK1-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !38
1710 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1711 // CHECK1:       omp.body.continue:
1712 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1713 // CHECK1:       omp.inner.for.inc:
1714 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
1715 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP15]], 1
1716 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
1717 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
1718 // CHECK1:       omp.inner.for.end:
1719 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1720 // CHECK1:       omp.loop.exit:
1721 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
1722 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1723 // CHECK1-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
1724 // CHECK1-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1725 // CHECK1:       .omp.final.then:
1726 // CHECK1-NEXT:    store i32 10, i32* [[I]], align 4
1727 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1728 // CHECK1:       .omp.final.done:
1729 // CHECK1-NEXT:    ret void
1730 //
1731 //
1732 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
1733 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
1734 // CHECK1-NEXT:  entry:
1735 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1736 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1737 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1738 // CHECK1-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
1739 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
1740 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1741 // CHECK1-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
1742 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1743 // CHECK1-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
1744 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1745 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1746 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1747 // CHECK1-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
1748 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
1749 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1750 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1751 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1752 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
1753 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
1754 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
1755 // CHECK1-NEXT:    [[CONV4:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1756 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[CONV4]], align 4
1757 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
1758 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 4
1759 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[N_CASTED]] to i32*
1760 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
1761 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
1762 // CHECK1-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV2]], align 2
1763 // CHECK1-NEXT:    [[CONV6:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1764 // CHECK1-NEXT:    store i16 [[TMP5]], i16* [[CONV6]], align 2
1765 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1766 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
1767 // CHECK1-NEXT:    [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
1768 // CHECK1-NEXT:    store i8 [[TMP7]], i8* [[CONV7]], align 1
1769 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
1770 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]])
1771 // CHECK1-NEXT:    ret void
1772 //
1773 //
1774 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13
1775 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
1776 // CHECK1-NEXT:  entry:
1777 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1778 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1779 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1780 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1781 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1782 // CHECK1-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
1783 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
1784 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1785 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1786 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1787 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
1788 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
1789 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1790 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1791 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1792 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1793 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1794 // CHECK1-NEXT:    [[I8:%.*]] = alloca i32, align 4
1795 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1796 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1797 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1798 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1799 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1800 // CHECK1-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
1801 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
1802 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1803 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1804 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1805 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
1806 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
1807 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
1808 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
1809 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
1810 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_4]], align 4
1811 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
1812 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1813 // CHECK1-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
1814 // CHECK1-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
1815 // CHECK1-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
1816 // CHECK1-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
1817 // CHECK1-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
1818 // CHECK1-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
1819 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1820 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
1821 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1822 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
1823 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
1824 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1825 // CHECK1:       omp.precond.then:
1826 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1827 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
1828 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
1829 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1830 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1831 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1832 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
1833 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1834 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1835 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
1836 // CHECK1-NEXT:    [[CMP9:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
1837 // CHECK1-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1838 // CHECK1:       cond.true:
1839 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
1840 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1841 // CHECK1:       cond.false:
1842 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1843 // CHECK1-NEXT:    br label [[COND_END]]
1844 // CHECK1:       cond.end:
1845 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
1846 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1847 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1848 // CHECK1-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
1849 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1850 // CHECK1:       omp.inner.for.cond:
1851 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
1852 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
1853 // CHECK1-NEXT:    [[ADD10:%.*]] = add i32 [[TMP17]], 1
1854 // CHECK1-NEXT:    [[CMP11:%.*]] = icmp ult i32 [[TMP16]], [[ADD10]]
1855 // CHECK1-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1856 // CHECK1:       omp.inner.for.body:
1857 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !41
1858 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
1859 // CHECK1-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
1860 // CHECK1-NEXT:    [[ADD12:%.*]] = add i32 [[TMP18]], [[MUL]]
1861 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[I8]], align 4, !llvm.access.group !41
1862 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !41
1863 // CHECK1-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP20]], 1
1864 // CHECK1-NEXT:    store i32 [[ADD13]], i32* [[CONV]], align 4, !llvm.access.group !41
1865 // CHECK1-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV2]], align 2, !llvm.access.group !41
1866 // CHECK1-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP21]] to i32
1867 // CHECK1-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
1868 // CHECK1-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
1869 // CHECK1-NEXT:    store i16 [[CONV16]], i16* [[CONV2]], align 2, !llvm.access.group !41
1870 // CHECK1-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV3]], align 1, !llvm.access.group !41
1871 // CHECK1-NEXT:    [[CONV17:%.*]] = sext i8 [[TMP22]] to i32
1872 // CHECK1-NEXT:    [[ADD18:%.*]] = add nsw i32 [[CONV17]], 1
1873 // CHECK1-NEXT:    [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
1874 // CHECK1-NEXT:    store i8 [[CONV19]], i8* [[CONV3]], align 1, !llvm.access.group !41
1875 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
1876 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !41
1877 // CHECK1-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP23]], 1
1878 // CHECK1-NEXT:    store i32 [[ADD20]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !41
1879 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1880 // CHECK1:       omp.body.continue:
1881 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1882 // CHECK1:       omp.inner.for.inc:
1883 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
1884 // CHECK1-NEXT:    [[ADD21:%.*]] = add i32 [[TMP24]], 1
1885 // CHECK1-NEXT:    store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
1886 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
1887 // CHECK1:       omp.inner.for.end:
1888 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1889 // CHECK1:       omp.loop.exit:
1890 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1891 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
1892 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
1893 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1894 // CHECK1-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
1895 // CHECK1-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1896 // CHECK1:       .omp.final.then:
1897 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1898 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
1899 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1900 // CHECK1-NEXT:    [[SUB22:%.*]] = sub i32 [[TMP30]], [[TMP31]]
1901 // CHECK1-NEXT:    [[SUB23:%.*]] = sub i32 [[SUB22]], 1
1902 // CHECK1-NEXT:    [[ADD24:%.*]] = add i32 [[SUB23]], 1
1903 // CHECK1-NEXT:    [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
1904 // CHECK1-NEXT:    [[MUL26:%.*]] = mul i32 [[DIV25]], 1
1905 // CHECK1-NEXT:    [[ADD27:%.*]] = add i32 [[TMP29]], [[MUL26]]
1906 // CHECK1-NEXT:    store i32 [[ADD27]], i32* [[I8]], align 4
1907 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1908 // CHECK1:       .omp.final.done:
1909 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1910 // CHECK1:       omp.precond.end:
1911 // CHECK1-NEXT:    ret void
1912 //
1913 //
1914 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
1915 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
1916 // CHECK1-NEXT:  entry:
1917 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1918 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1919 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
1920 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1921 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1922 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1923 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1924 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
1925 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1926 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1927 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
1928 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
1929 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1930 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
1931 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
1932 // CHECK1-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
1933 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1934 // CHECK1-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
1935 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1936 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
1937 // CHECK1-NEXT:    ret void
1938 //
1939 //
1940 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16
1941 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
1942 // CHECK1-NEXT:  entry:
1943 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1944 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1945 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1946 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1947 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
1948 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1949 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1950 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1951 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1952 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1953 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1954 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1955 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1956 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1957 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1958 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1959 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
1960 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1961 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1962 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
1963 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1964 // CHECK1-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
1965 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1966 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1967 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1968 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1969 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1970 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1971 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
1972 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1973 // CHECK1:       cond.true:
1974 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1975 // CHECK1:       cond.false:
1976 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1977 // CHECK1-NEXT:    br label [[COND_END]]
1978 // CHECK1:       cond.end:
1979 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1980 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1981 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1982 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
1983 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1984 // CHECK1:       omp.inner.for.cond:
1985 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
1986 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !44
1987 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1988 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1989 // CHECK1:       omp.inner.for.body:
1990 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
1991 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
1992 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1993 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !44
1994 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !44
1995 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
1996 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !44
1997 // CHECK1-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !44
1998 // CHECK1-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
1999 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
2000 // CHECK1-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
2001 // CHECK1-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !44
2002 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
2003 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
2004 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
2005 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
2006 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2007 // CHECK1:       omp.body.continue:
2008 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2009 // CHECK1:       omp.inner.for.inc:
2010 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
2011 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP12]], 1
2012 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
2013 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
2014 // CHECK1:       omp.inner.for.end:
2015 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2016 // CHECK1:       omp.loop.exit:
2017 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2018 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2019 // CHECK1-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2020 // CHECK1-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2021 // CHECK1:       .omp.final.then:
2022 // CHECK1-NEXT:    store i32 10, i32* [[I]], align 4
2023 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2024 // CHECK1:       .omp.final.done:
2025 // CHECK1-NEXT:    ret void
2026 //
2027 //
2028 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2029 // CHECK1-SAME: () #[[ATTR5]] {
2030 // CHECK1-NEXT:  entry:
2031 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
2032 // CHECK1-NEXT:    ret void
2033 //
2034 //
2035 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooi
2036 // CHECK2-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
2037 // CHECK2-NEXT:  entry:
2038 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2039 // CHECK2-NEXT:    [[A:%.*]] = alloca i32, align 4
2040 // CHECK2-NEXT:    [[AA:%.*]] = alloca i16, align 2
2041 // CHECK2-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
2042 // CHECK2-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
2043 // CHECK2-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
2044 // CHECK2-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
2045 // CHECK2-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
2046 // CHECK2-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
2047 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2048 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2049 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
2050 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
2051 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED4:%.*]] = alloca i64, align 8
2052 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
2053 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
2054 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
2055 // CHECK2-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
2056 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2057 // CHECK2-NEXT:    [[AA_CASTED7:%.*]] = alloca i64, align 8
2058 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS9:%.*]] = alloca [1 x i8*], align 8
2059 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS10:%.*]] = alloca [1 x i8*], align 8
2060 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS11:%.*]] = alloca [1 x i8*], align 8
2061 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2062 // CHECK2-NEXT:    [[A_CASTED12:%.*]] = alloca i64, align 8
2063 // CHECK2-NEXT:    [[AA_CASTED14:%.*]] = alloca i64, align 8
2064 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [2 x i8*], align 8
2065 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS17:%.*]] = alloca [2 x i8*], align 8
2066 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [2 x i8*], align 8
2067 // CHECK2-NEXT:    [[_TMP19:%.*]] = alloca i32, align 4
2068 // CHECK2-NEXT:    [[A_CASTED22:%.*]] = alloca i64, align 8
2069 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [9 x i8*], align 8
2070 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS27:%.*]] = alloca [9 x i8*], align 8
2071 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [9 x i8*], align 8
2072 // CHECK2-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8
2073 // CHECK2-NEXT:    [[_TMP29:%.*]] = alloca i32, align 4
2074 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
2075 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2076 // CHECK2-NEXT:    store i32 0, i32* [[A]], align 4
2077 // CHECK2-NEXT:    store i16 0, i16* [[AA]], align 2
2078 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
2079 // CHECK2-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
2080 // CHECK2-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
2081 // CHECK2-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
2082 // CHECK2-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
2083 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
2084 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
2085 // CHECK2-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
2086 // CHECK2-NEXT:    [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
2087 // CHECK2-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
2088 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
2089 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
2090 // CHECK2-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_]], align 4
2091 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
2092 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2093 // CHECK2-NEXT:    [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
2094 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
2095 // CHECK2-NEXT:    store i16 [[TMP9]], i16* [[CONV]], align 2
2096 // CHECK2-NEXT:    [[TMP10:%.*]] = load i64, i64* [[AA_CASTED]], align 8
2097 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2098 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
2099 // CHECK2-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
2100 // CHECK2-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
2101 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2102 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED4]] to i32*
2103 // CHECK2-NEXT:    store i32 [[TMP13]], i32* [[CONV5]], align 4
2104 // CHECK2-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED4]], align 8
2105 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2106 // CHECK2-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64*
2107 // CHECK2-NEXT:    store i64 [[TMP10]], i64* [[TMP16]], align 8
2108 // CHECK2-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2109 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
2110 // CHECK2-NEXT:    store i64 [[TMP10]], i64* [[TMP18]], align 8
2111 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
2112 // CHECK2-NEXT:    store i8* null, i8** [[TMP19]], align 8
2113 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
2114 // CHECK2-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64*
2115 // CHECK2-NEXT:    store i64 [[TMP12]], i64* [[TMP21]], align 8
2116 // CHECK2-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
2117 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
2118 // CHECK2-NEXT:    store i64 [[TMP12]], i64* [[TMP23]], align 8
2119 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
2120 // CHECK2-NEXT:    store i8* null, i8** [[TMP24]], align 8
2121 // CHECK2-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
2122 // CHECK2-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64*
2123 // CHECK2-NEXT:    store i64 [[TMP14]], i64* [[TMP26]], align 8
2124 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
2125 // CHECK2-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64*
2126 // CHECK2-NEXT:    store i64 [[TMP14]], i64* [[TMP28]], align 8
2127 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
2128 // CHECK2-NEXT:    store i8* null, i8** [[TMP29]], align 8
2129 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2130 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2131 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
2132 // CHECK2-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2
2133 // CHECK2-NEXT:    store i16 [[TMP33]], i16* [[TMP32]], align 4
2134 // CHECK2-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
2135 // CHECK2-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2136 // CHECK2-NEXT:    store i32 [[TMP35]], i32* [[TMP34]], align 4
2137 // CHECK2-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
2138 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2139 // CHECK2-NEXT:    store i32 [[TMP37]], i32* [[TMP36]], align 4
2140 // CHECK2-NEXT:    [[TMP38:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
2141 // CHECK2-NEXT:    [[TMP39:%.*]] = bitcast i8* [[TMP38]] to %struct.kmp_task_t_with_privates*
2142 // CHECK2-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 0
2143 // CHECK2-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP40]], i32 0, i32 0
2144 // CHECK2-NEXT:    [[TMP42:%.*]] = load i8*, i8** [[TMP41]], align 8
2145 // CHECK2-NEXT:    [[TMP43:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
2146 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i64 12, i1 false)
2147 // CHECK2-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 1
2148 // CHECK2-NEXT:    [[TMP45:%.*]] = bitcast i8* [[TMP42]] to %struct.anon*
2149 // CHECK2-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 0
2150 // CHECK2-NEXT:    [[TMP47:%.*]] = bitcast [3 x i8*]* [[TMP46]] to i8*
2151 // CHECK2-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP30]] to i8*
2152 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP47]], i8* align 8 [[TMP48]], i64 24, i1 false)
2153 // CHECK2-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 1
2154 // CHECK2-NEXT:    [[TMP50:%.*]] = bitcast [3 x i8*]* [[TMP49]] to i8*
2155 // CHECK2-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP31]] to i8*
2156 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP50]], i8* align 8 [[TMP51]], i64 24, i1 false)
2157 // CHECK2-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 2
2158 // CHECK2-NEXT:    [[TMP53:%.*]] = bitcast [3 x i64]* [[TMP52]] to i8*
2159 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP53]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes to i8*), i64 24, i1 false)
2160 // CHECK2-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 3
2161 // CHECK2-NEXT:    [[TMP55:%.*]] = load i16, i16* [[AA]], align 2
2162 // CHECK2-NEXT:    store i16 [[TMP55]], i16* [[TMP54]], align 8
2163 // CHECK2-NEXT:    [[TMP56:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP38]])
2164 // CHECK2-NEXT:    [[TMP57:%.*]] = load i32, i32* [[A]], align 4
2165 // CHECK2-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2166 // CHECK2-NEXT:    store i32 [[TMP57]], i32* [[CONV6]], align 4
2167 // CHECK2-NEXT:    [[TMP58:%.*]] = load i64, i64* [[A_CASTED]], align 8
2168 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i64 [[TMP58]]) #[[ATTR4:[0-9]+]]
2169 // CHECK2-NEXT:    [[TMP59:%.*]] = load i16, i16* [[AA]], align 2
2170 // CHECK2-NEXT:    [[CONV8:%.*]] = bitcast i64* [[AA_CASTED7]] to i16*
2171 // CHECK2-NEXT:    store i16 [[TMP59]], i16* [[CONV8]], align 2
2172 // CHECK2-NEXT:    [[TMP60:%.*]] = load i64, i64* [[AA_CASTED7]], align 8
2173 // CHECK2-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
2174 // CHECK2-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64*
2175 // CHECK2-NEXT:    store i64 [[TMP60]], i64* [[TMP62]], align 8
2176 // CHECK2-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
2177 // CHECK2-NEXT:    [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64*
2178 // CHECK2-NEXT:    store i64 [[TMP60]], i64* [[TMP64]], align 8
2179 // CHECK2-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS11]], i64 0, i64 0
2180 // CHECK2-NEXT:    store i8* null, i8** [[TMP65]], align 8
2181 // CHECK2-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
2182 // CHECK2-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
2183 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
2184 // CHECK2-NEXT:    [[TMP68:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111.region_id, i32 1, i8** [[TMP66]], i8** [[TMP67]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
2185 // CHECK2-NEXT:    [[TMP69:%.*]] = icmp ne i32 [[TMP68]], 0
2186 // CHECK2-NEXT:    br i1 [[TMP69]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2187 // CHECK2:       omp_offload.failed:
2188 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i64 [[TMP60]]) #[[ATTR4]]
2189 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2190 // CHECK2:       omp_offload.cont:
2191 // CHECK2-NEXT:    [[TMP70:%.*]] = load i32, i32* [[A]], align 4
2192 // CHECK2-NEXT:    [[CONV13:%.*]] = bitcast i64* [[A_CASTED12]] to i32*
2193 // CHECK2-NEXT:    store i32 [[TMP70]], i32* [[CONV13]], align 4
2194 // CHECK2-NEXT:    [[TMP71:%.*]] = load i64, i64* [[A_CASTED12]], align 8
2195 // CHECK2-NEXT:    [[TMP72:%.*]] = load i16, i16* [[AA]], align 2
2196 // CHECK2-NEXT:    [[CONV15:%.*]] = bitcast i64* [[AA_CASTED14]] to i16*
2197 // CHECK2-NEXT:    store i16 [[TMP72]], i16* [[CONV15]], align 2
2198 // CHECK2-NEXT:    [[TMP73:%.*]] = load i64, i64* [[AA_CASTED14]], align 8
2199 // CHECK2-NEXT:    [[TMP74:%.*]] = load i32, i32* [[N_ADDR]], align 4
2200 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP74]], 10
2201 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
2202 // CHECK2:       omp_if.then:
2203 // CHECK2-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
2204 // CHECK2-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
2205 // CHECK2-NEXT:    store i64 [[TMP71]], i64* [[TMP76]], align 8
2206 // CHECK2-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
2207 // CHECK2-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
2208 // CHECK2-NEXT:    store i64 [[TMP71]], i64* [[TMP78]], align 8
2209 // CHECK2-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0
2210 // CHECK2-NEXT:    store i8* null, i8** [[TMP79]], align 8
2211 // CHECK2-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
2212 // CHECK2-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
2213 // CHECK2-NEXT:    store i64 [[TMP73]], i64* [[TMP81]], align 8
2214 // CHECK2-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
2215 // CHECK2-NEXT:    [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64*
2216 // CHECK2-NEXT:    store i64 [[TMP73]], i64* [[TMP83]], align 8
2217 // CHECK2-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1
2218 // CHECK2-NEXT:    store i8* null, i8** [[TMP84]], align 8
2219 // CHECK2-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
2220 // CHECK2-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
2221 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
2222 // CHECK2-NEXT:    [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118.region_id, i32 2, i8** [[TMP85]], i8** [[TMP86]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
2223 // CHECK2-NEXT:    [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0
2224 // CHECK2-NEXT:    br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
2225 // CHECK2:       omp_offload.failed20:
2226 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP71]], i64 [[TMP73]]) #[[ATTR4]]
2227 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT21]]
2228 // CHECK2:       omp_offload.cont21:
2229 // CHECK2-NEXT:    br label [[OMP_IF_END:%.*]]
2230 // CHECK2:       omp_if.else:
2231 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP71]], i64 [[TMP73]]) #[[ATTR4]]
2232 // CHECK2-NEXT:    br label [[OMP_IF_END]]
2233 // CHECK2:       omp_if.end:
2234 // CHECK2-NEXT:    [[TMP89:%.*]] = load i32, i32* [[A]], align 4
2235 // CHECK2-NEXT:    [[CONV23:%.*]] = bitcast i64* [[A_CASTED22]] to i32*
2236 // CHECK2-NEXT:    store i32 [[TMP89]], i32* [[CONV23]], align 4
2237 // CHECK2-NEXT:    [[TMP90:%.*]] = load i64, i64* [[A_CASTED22]], align 8
2238 // CHECK2-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N_ADDR]], align 4
2239 // CHECK2-NEXT:    [[CMP24:%.*]] = icmp sgt i32 [[TMP91]], 20
2240 // CHECK2-NEXT:    br i1 [[CMP24]], label [[OMP_IF_THEN25:%.*]], label [[OMP_IF_ELSE32:%.*]]
2241 // CHECK2:       omp_if.then25:
2242 // CHECK2-NEXT:    [[TMP92:%.*]] = mul nuw i64 [[TMP2]], 4
2243 // CHECK2-NEXT:    [[TMP93:%.*]] = mul nuw i64 5, [[TMP5]]
2244 // CHECK2-NEXT:    [[TMP94:%.*]] = mul nuw i64 [[TMP93]], 8
2245 // CHECK2-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
2246 // CHECK2-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64*
2247 // CHECK2-NEXT:    store i64 [[TMP90]], i64* [[TMP96]], align 8
2248 // CHECK2-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
2249 // CHECK2-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64*
2250 // CHECK2-NEXT:    store i64 [[TMP90]], i64* [[TMP98]], align 8
2251 // CHECK2-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
2252 // CHECK2-NEXT:    store i64 4, i64* [[TMP99]], align 8
2253 // CHECK2-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0
2254 // CHECK2-NEXT:    store i8* null, i8** [[TMP100]], align 8
2255 // CHECK2-NEXT:    [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1
2256 // CHECK2-NEXT:    [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]**
2257 // CHECK2-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8
2258 // CHECK2-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1
2259 // CHECK2-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]**
2260 // CHECK2-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8
2261 // CHECK2-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
2262 // CHECK2-NEXT:    store i64 40, i64* [[TMP105]], align 8
2263 // CHECK2-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1
2264 // CHECK2-NEXT:    store i8* null, i8** [[TMP106]], align 8
2265 // CHECK2-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2
2266 // CHECK2-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
2267 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP108]], align 8
2268 // CHECK2-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2
2269 // CHECK2-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64*
2270 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP110]], align 8
2271 // CHECK2-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
2272 // CHECK2-NEXT:    store i64 8, i64* [[TMP111]], align 8
2273 // CHECK2-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2
2274 // CHECK2-NEXT:    store i8* null, i8** [[TMP112]], align 8
2275 // CHECK2-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3
2276 // CHECK2-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float**
2277 // CHECK2-NEXT:    store float* [[VLA]], float** [[TMP114]], align 8
2278 // CHECK2-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3
2279 // CHECK2-NEXT:    [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float**
2280 // CHECK2-NEXT:    store float* [[VLA]], float** [[TMP116]], align 8
2281 // CHECK2-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
2282 // CHECK2-NEXT:    store i64 [[TMP92]], i64* [[TMP117]], align 8
2283 // CHECK2-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3
2284 // CHECK2-NEXT:    store i8* null, i8** [[TMP118]], align 8
2285 // CHECK2-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4
2286 // CHECK2-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]**
2287 // CHECK2-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8
2288 // CHECK2-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4
2289 // CHECK2-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]**
2290 // CHECK2-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8
2291 // CHECK2-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
2292 // CHECK2-NEXT:    store i64 400, i64* [[TMP123]], align 8
2293 // CHECK2-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4
2294 // CHECK2-NEXT:    store i8* null, i8** [[TMP124]], align 8
2295 // CHECK2-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5
2296 // CHECK2-NEXT:    [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64*
2297 // CHECK2-NEXT:    store i64 5, i64* [[TMP126]], align 8
2298 // CHECK2-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5
2299 // CHECK2-NEXT:    [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64*
2300 // CHECK2-NEXT:    store i64 5, i64* [[TMP128]], align 8
2301 // CHECK2-NEXT:    [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
2302 // CHECK2-NEXT:    store i64 8, i64* [[TMP129]], align 8
2303 // CHECK2-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5
2304 // CHECK2-NEXT:    store i8* null, i8** [[TMP130]], align 8
2305 // CHECK2-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6
2306 // CHECK2-NEXT:    [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64*
2307 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[TMP132]], align 8
2308 // CHECK2-NEXT:    [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6
2309 // CHECK2-NEXT:    [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64*
2310 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[TMP134]], align 8
2311 // CHECK2-NEXT:    [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
2312 // CHECK2-NEXT:    store i64 8, i64* [[TMP135]], align 8
2313 // CHECK2-NEXT:    [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6
2314 // CHECK2-NEXT:    store i8* null, i8** [[TMP136]], align 8
2315 // CHECK2-NEXT:    [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7
2316 // CHECK2-NEXT:    [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double**
2317 // CHECK2-NEXT:    store double* [[VLA1]], double** [[TMP138]], align 8
2318 // CHECK2-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7
2319 // CHECK2-NEXT:    [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double**
2320 // CHECK2-NEXT:    store double* [[VLA1]], double** [[TMP140]], align 8
2321 // CHECK2-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
2322 // CHECK2-NEXT:    store i64 [[TMP94]], i64* [[TMP141]], align 8
2323 // CHECK2-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7
2324 // CHECK2-NEXT:    store i8* null, i8** [[TMP142]], align 8
2325 // CHECK2-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8
2326 // CHECK2-NEXT:    [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT**
2327 // CHECK2-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8
2328 // CHECK2-NEXT:    [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8
2329 // CHECK2-NEXT:    [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT**
2330 // CHECK2-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8
2331 // CHECK2-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
2332 // CHECK2-NEXT:    store i64 16, i64* [[TMP147]], align 8
2333 // CHECK2-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8
2334 // CHECK2-NEXT:    store i8* null, i8** [[TMP148]], align 8
2335 // CHECK2-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
2336 // CHECK2-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
2337 // CHECK2-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
2338 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
2339 // CHECK2-NEXT:    [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
2340 // CHECK2-NEXT:    [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0
2341 // CHECK2-NEXT:    br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
2342 // CHECK2:       omp_offload.failed30:
2343 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
2344 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
2345 // CHECK2:       omp_offload.cont31:
2346 // CHECK2-NEXT:    br label [[OMP_IF_END33:%.*]]
2347 // CHECK2:       omp_if.else32:
2348 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
2349 // CHECK2-NEXT:    br label [[OMP_IF_END33]]
2350 // CHECK2:       omp_if.end33:
2351 // CHECK2-NEXT:    [[TMP154:%.*]] = load i32, i32* [[A]], align 4
2352 // CHECK2-NEXT:    [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
2353 // CHECK2-NEXT:    call void @llvm.stackrestore(i8* [[TMP155]])
2354 // CHECK2-NEXT:    ret i32 [[TMP154]]
2355 //
2356 //
2357 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
2358 // CHECK2-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
2359 // CHECK2-NEXT:  entry:
2360 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2361 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2362 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
2363 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
2364 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
2365 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2366 // CHECK2-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2367 // CHECK2-NEXT:    store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8
2368 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2369 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2370 // CHECK2-NEXT:    [[CONV4:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32*
2371 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV3]], align 4
2372 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV4]], align 4
2373 // CHECK2-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
2374 // CHECK2-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
2375 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
2376 // CHECK2-NEXT:    store i16 [[TMP3]], i16* [[CONV5]], align 2
2377 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
2378 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP4]])
2379 // CHECK2-NEXT:    ret void
2380 //
2381 //
2382 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
2383 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
2384 // CHECK2-NEXT:  entry:
2385 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2386 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2387 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2388 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2389 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2390 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2391 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2392 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2393 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2394 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2395 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2396 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2397 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2398 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2399 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2400 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
2401 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2402 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2403 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2404 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2405 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2406 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2407 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
2408 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2409 // CHECK2:       cond.true:
2410 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2411 // CHECK2:       cond.false:
2412 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2413 // CHECK2-NEXT:    br label [[COND_END]]
2414 // CHECK2:       cond.end:
2415 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2416 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2417 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2418 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2419 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2420 // CHECK2:       omp.inner.for.cond:
2421 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2422 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
2423 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2424 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2425 // CHECK2:       omp.inner.for.body:
2426 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2427 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
2428 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2429 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
2430 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2431 // CHECK2:       omp.body.continue:
2432 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2433 // CHECK2:       omp.inner.for.inc:
2434 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2435 // CHECK2-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
2436 // CHECK2-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2437 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
2438 // CHECK2:       omp.inner.for.end:
2439 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2440 // CHECK2:       omp.loop.exit:
2441 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
2442 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2443 // CHECK2-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
2444 // CHECK2-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2445 // CHECK2:       .omp.final.then:
2446 // CHECK2-NEXT:    store i32 10, i32* [[I]], align 4
2447 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2448 // CHECK2:       .omp.final.done:
2449 // CHECK2-NEXT:    ret void
2450 //
2451 //
2452 // CHECK2-LABEL: define {{[^@]+}}@.omp_task_privates_map.
2453 // CHECK2-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i16** noalias noundef [[TMP1:%.*]], [3 x i8*]** noalias noundef [[TMP2:%.*]], [3 x i8*]** noalias noundef [[TMP3:%.*]], [3 x i64]** noalias noundef [[TMP4:%.*]]) #[[ATTR5:[0-9]+]] {
2454 // CHECK2-NEXT:  entry:
2455 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
2456 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i16**, align 8
2457 // CHECK2-NEXT:    [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 8
2458 // CHECK2-NEXT:    [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 8
2459 // CHECK2-NEXT:    [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 8
2460 // CHECK2-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
2461 // CHECK2-NEXT:    store i16** [[TMP1]], i16*** [[DOTADDR1]], align 8
2462 // CHECK2-NEXT:    store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 8
2463 // CHECK2-NEXT:    store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 8
2464 // CHECK2-NEXT:    store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 8
2465 // CHECK2-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
2466 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0
2467 // CHECK2-NEXT:    [[TMP7:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 8
2468 // CHECK2-NEXT:    store [3 x i8*]* [[TMP6]], [3 x i8*]** [[TMP7]], align 8
2469 // CHECK2-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1
2470 // CHECK2-NEXT:    [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 8
2471 // CHECK2-NEXT:    store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 8
2472 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2
2473 // CHECK2-NEXT:    [[TMP11:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 8
2474 // CHECK2-NEXT:    store [3 x i64]* [[TMP10]], [3 x i64]** [[TMP11]], align 8
2475 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3
2476 // CHECK2-NEXT:    [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 8
2477 // CHECK2-NEXT:    store i16* [[TMP12]], i16** [[TMP13]], align 8
2478 // CHECK2-NEXT:    ret void
2479 //
2480 //
2481 // CHECK2-LABEL: define {{[^@]+}}@.omp_task_entry.
2482 // CHECK2-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
2483 // CHECK2-NEXT:  entry:
2484 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
2485 // CHECK2-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
2486 // CHECK2-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
2487 // CHECK2-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
2488 // CHECK2-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
2489 // CHECK2-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
2490 // CHECK2-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 8
2491 // CHECK2-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 8
2492 // CHECK2-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 8
2493 // CHECK2-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 8
2494 // CHECK2-NEXT:    [[AA_CASTED_I:%.*]] = alloca i64, align 8
2495 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED_I:%.*]] = alloca i64, align 8
2496 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED5_I:%.*]] = alloca i64, align 8
2497 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
2498 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
2499 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
2500 // CHECK2-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
2501 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
2502 // CHECK2-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
2503 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
2504 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
2505 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
2506 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2507 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
2508 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
2509 // CHECK2-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
2510 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
2511 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
2512 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
2513 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
2514 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
2515 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26
2516 // CHECK2-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !26
2517 // CHECK2-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
2518 // CHECK2-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
2519 // CHECK2-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !26
2520 // CHECK2-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
2521 // CHECK2-NEXT:    [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
2522 // CHECK2-NEXT:    [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
2523 // CHECK2-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
2524 // CHECK2-NEXT:    [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)*
2525 // CHECK2-NEXT:    call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
2526 // CHECK2-NEXT:    [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !26
2527 // CHECK2-NEXT:    [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !26
2528 // CHECK2-NEXT:    [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !26
2529 // CHECK2-NEXT:    [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !26
2530 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i64 0, i64 0
2531 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i64 0, i64 0
2532 // CHECK2-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i64 0, i64 0
2533 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
2534 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
2535 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[TMP23]], align 4
2536 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) #[[ATTR4]]
2537 // CHECK2-NEXT:    [[TMP26:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 [[TMP25]], i32 1, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
2538 // CHECK2-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
2539 // CHECK2-NEXT:    br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
2540 // CHECK2:       omp_offload.failed.i:
2541 // CHECK2-NEXT:    [[TMP28:%.*]] = load i16, i16* [[TMP16]], align 2
2542 // CHECK2-NEXT:    [[CONV_I:%.*]] = bitcast i64* [[AA_CASTED_I]] to i16*
2543 // CHECK2-NEXT:    store i16 [[TMP28]], i16* [[CONV_I]], align 2, !noalias !26
2544 // CHECK2-NEXT:    [[TMP29:%.*]] = load i64, i64* [[AA_CASTED_I]], align 8, !noalias !26
2545 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP23]], align 4
2546 // CHECK2-NEXT:    [[CONV4_I:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED_I]] to i32*
2547 // CHECK2-NEXT:    store i32 [[TMP30]], i32* [[CONV4_I]], align 4, !noalias !26
2548 // CHECK2-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !26
2549 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP24]], align 4
2550 // CHECK2-NEXT:    [[CONV6_I:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED5_I]] to i32*
2551 // CHECK2-NEXT:    store i32 [[TMP32]], i32* [[CONV6_I]], align 4, !noalias !26
2552 // CHECK2-NEXT:    [[TMP33:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED5_I]], align 8, !noalias !26
2553 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i64 [[TMP29]], i64 [[TMP31]], i64 [[TMP33]]) #[[ATTR4]]
2554 // CHECK2-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
2555 // CHECK2:       .omp_outlined..1.exit:
2556 // CHECK2-NEXT:    ret i32 0
2557 //
2558 //
2559 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
2560 // CHECK2-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
2561 // CHECK2-NEXT:  entry:
2562 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2563 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2564 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2565 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2566 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
2567 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2568 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[CONV1]], align 4
2569 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
2570 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]])
2571 // CHECK2-NEXT:    ret void
2572 //
2573 //
2574 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
2575 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
2576 // CHECK2-NEXT:  entry:
2577 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2578 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2579 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2580 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2581 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2582 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2583 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2584 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2585 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2586 // CHECK2-NEXT:    [[A1:%.*]] = alloca i32, align 4
2587 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2588 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2589 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2590 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2591 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2592 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
2593 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2594 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2595 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2596 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2597 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2598 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2599 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
2600 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2601 // CHECK2:       cond.true:
2602 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2603 // CHECK2:       cond.false:
2604 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2605 // CHECK2-NEXT:    br label [[COND_END]]
2606 // CHECK2:       cond.end:
2607 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2608 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2609 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2610 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2611 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2612 // CHECK2:       omp.inner.for.cond:
2613 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2614 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2615 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2616 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2617 // CHECK2:       omp.inner.for.body:
2618 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2619 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
2620 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2621 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4
2622 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A1]], align 4
2623 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
2624 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[A1]], align 4
2625 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2626 // CHECK2:       omp.body.continue:
2627 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2628 // CHECK2:       omp.inner.for.inc:
2629 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2630 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1
2631 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
2632 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
2633 // CHECK2:       omp.inner.for.end:
2634 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2635 // CHECK2:       omp.loop.exit:
2636 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
2637 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2638 // CHECK2-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
2639 // CHECK2-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2640 // CHECK2:       .omp.final.then:
2641 // CHECK2-NEXT:    store i32 10, i32* [[CONV]], align 4
2642 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2643 // CHECK2:       .omp.final.done:
2644 // CHECK2-NEXT:    ret void
2645 //
2646 //
2647 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
2648 // CHECK2-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR2]] {
2649 // CHECK2-NEXT:  entry:
2650 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2651 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
2652 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2653 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2654 // CHECK2-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
2655 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
2656 // CHECK2-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
2657 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
2658 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]])
2659 // CHECK2-NEXT:    ret void
2660 //
2661 //
2662 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
2663 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
2664 // CHECK2-NEXT:  entry:
2665 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2666 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2667 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2668 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2669 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2670 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2671 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2672 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2673 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2674 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2675 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2676 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2677 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2678 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2679 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2680 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
2681 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2682 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2683 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2684 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2685 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2686 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2687 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
2688 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2689 // CHECK2:       cond.true:
2690 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2691 // CHECK2:       cond.false:
2692 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2693 // CHECK2-NEXT:    br label [[COND_END]]
2694 // CHECK2:       cond.end:
2695 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2696 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2697 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2698 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2699 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2700 // CHECK2:       omp.inner.for.cond:
2701 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
2702 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
2703 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2704 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2705 // CHECK2:       omp.inner.for.body:
2706 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
2707 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
2708 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2709 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !29
2710 // CHECK2-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !29
2711 // CHECK2-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
2712 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
2713 // CHECK2-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
2714 // CHECK2-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !29
2715 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2716 // CHECK2:       omp.body.continue:
2717 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2718 // CHECK2:       omp.inner.for.inc:
2719 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
2720 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
2721 // CHECK2-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
2722 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
2723 // CHECK2:       omp.inner.for.end:
2724 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2725 // CHECK2:       omp.loop.exit:
2726 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
2727 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2728 // CHECK2-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
2729 // CHECK2-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2730 // CHECK2:       .omp.final.then:
2731 // CHECK2-NEXT:    store i32 10, i32* [[I]], align 4
2732 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2733 // CHECK2:       .omp.final.done:
2734 // CHECK2-NEXT:    ret void
2735 //
2736 //
2737 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
2738 // CHECK2-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] {
2739 // CHECK2-NEXT:  entry:
2740 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2741 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2742 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2743 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
2744 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2745 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2746 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2747 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2748 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
2749 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2750 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
2751 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
2752 // CHECK2-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
2753 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
2754 // CHECK2-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
2755 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
2756 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
2757 // CHECK2-NEXT:    ret void
2758 //
2759 //
2760 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6
2761 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
2762 // CHECK2-NEXT:  entry:
2763 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2764 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2765 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2766 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2767 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2768 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2769 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2770 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2771 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2772 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2773 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2774 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2775 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2776 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2777 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2778 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2779 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2780 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2781 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
2782 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2783 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2784 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2785 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2786 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2787 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2788 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
2789 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2790 // CHECK2:       cond.true:
2791 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2792 // CHECK2:       cond.false:
2793 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2794 // CHECK2-NEXT:    br label [[COND_END]]
2795 // CHECK2:       cond.end:
2796 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2797 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2798 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2799 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2800 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2801 // CHECK2:       omp.inner.for.cond:
2802 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
2803 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
2804 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2805 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2806 // CHECK2:       omp.inner.for.body:
2807 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
2808 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
2809 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2810 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !32
2811 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !32
2812 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
2813 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !32
2814 // CHECK2-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !32
2815 // CHECK2-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
2816 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
2817 // CHECK2-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
2818 // CHECK2-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !32
2819 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2820 // CHECK2:       omp.body.continue:
2821 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2822 // CHECK2:       omp.inner.for.inc:
2823 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
2824 // CHECK2-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
2825 // CHECK2-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
2826 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
2827 // CHECK2:       omp.inner.for.end:
2828 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2829 // CHECK2:       omp.loop.exit:
2830 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
2831 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2832 // CHECK2-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
2833 // CHECK2-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2834 // CHECK2:       .omp.final.then:
2835 // CHECK2-NEXT:    store i32 10, i32* [[I]], align 4
2836 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2837 // CHECK2:       .omp.final.done:
2838 // CHECK2-NEXT:    ret void
2839 //
2840 //
2841 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
2842 // CHECK2-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
2843 // CHECK2-NEXT:  entry:
2844 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2845 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
2846 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
2847 // CHECK2-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
2848 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
2849 // CHECK2-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
2850 // CHECK2-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
2851 // CHECK2-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
2852 // CHECK2-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
2853 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2854 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2855 // CHECK2-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
2856 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
2857 // CHECK2-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
2858 // CHECK2-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
2859 // CHECK2-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
2860 // CHECK2-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
2861 // CHECK2-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
2862 // CHECK2-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
2863 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2864 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
2865 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
2866 // CHECK2-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
2867 // CHECK2-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
2868 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
2869 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
2870 // CHECK2-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
2871 // CHECK2-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
2872 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
2873 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2874 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[CONV5]], align 4
2875 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
2876 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
2877 // CHECK2-NEXT:    ret void
2878 //
2879 //
2880 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9
2881 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR3]] {
2882 // CHECK2-NEXT:  entry:
2883 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2884 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2885 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2886 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
2887 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
2888 // CHECK2-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
2889 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
2890 // CHECK2-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
2891 // CHECK2-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
2892 // CHECK2-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
2893 // CHECK2-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
2894 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2895 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2896 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2897 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2898 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2899 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2900 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2901 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2902 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2903 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2904 // CHECK2-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
2905 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
2906 // CHECK2-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
2907 // CHECK2-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
2908 // CHECK2-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
2909 // CHECK2-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
2910 // CHECK2-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
2911 // CHECK2-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
2912 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2913 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
2914 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
2915 // CHECK2-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
2916 // CHECK2-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
2917 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
2918 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
2919 // CHECK2-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
2920 // CHECK2-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
2921 // CHECK2-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 0
2922 // CHECK2-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
2923 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2924 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
2925 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2926 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2927 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2928 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2929 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2930 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2931 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
2932 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2933 // CHECK2:       cond.true:
2934 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2935 // CHECK2:       cond.false:
2936 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2937 // CHECK2-NEXT:    br label [[COND_END]]
2938 // CHECK2:       cond.end:
2939 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
2940 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2941 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2942 // CHECK2-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
2943 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2944 // CHECK2:       omp.inner.for.cond:
2945 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2946 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
2947 // CHECK2-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
2948 // CHECK2-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2949 // CHECK2:       omp.inner.for.body:
2950 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2951 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
2952 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2953 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !35
2954 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !35
2955 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
2956 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[CONV]], align 4, !llvm.access.group !35
2957 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
2958 // CHECK2-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35
2959 // CHECK2-NEXT:    [[CONV7:%.*]] = fpext float [[TMP17]] to double
2960 // CHECK2-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
2961 // CHECK2-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
2962 // CHECK2-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35
2963 // CHECK2-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
2964 // CHECK2-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !35
2965 // CHECK2-NEXT:    [[CONV11:%.*]] = fpext float [[TMP18]] to double
2966 // CHECK2-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
2967 // CHECK2-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
2968 // CHECK2-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !35
2969 // CHECK2-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
2970 // CHECK2-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i64 0, i64 2
2971 // CHECK2-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !35
2972 // CHECK2-NEXT:    [[ADD16:%.*]] = fadd double [[TMP19]], 1.000000e+00
2973 // CHECK2-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !35
2974 // CHECK2-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]]
2975 // CHECK2-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP20]]
2976 // CHECK2-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i64 3
2977 // CHECK2-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !35
2978 // CHECK2-NEXT:    [[ADD19:%.*]] = fadd double [[TMP21]], 1.000000e+00
2979 // CHECK2-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !35
2980 // CHECK2-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
2981 // CHECK2-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35
2982 // CHECK2-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP22]], 1
2983 // CHECK2-NEXT:    store i64 [[ADD20]], i64* [[X]], align 8, !llvm.access.group !35
2984 // CHECK2-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
2985 // CHECK2-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35
2986 // CHECK2-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP23]] to i32
2987 // CHECK2-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
2988 // CHECK2-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
2989 // CHECK2-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 8, !llvm.access.group !35
2990 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2991 // CHECK2:       omp.body.continue:
2992 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2993 // CHECK2:       omp.inner.for.inc:
2994 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2995 // CHECK2-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP24]], 1
2996 // CHECK2-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2997 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
2998 // CHECK2:       omp.inner.for.end:
2999 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3000 // CHECK2:       omp.loop.exit:
3001 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
3002 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3003 // CHECK2-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
3004 // CHECK2-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3005 // CHECK2:       .omp.final.then:
3006 // CHECK2-NEXT:    store i32 10, i32* [[I]], align 4
3007 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3008 // CHECK2:       .omp.final.done:
3009 // CHECK2-NEXT:    ret void
3010 //
3011 //
3012 // CHECK2-LABEL: define {{[^@]+}}@_Z3bari
3013 // CHECK2-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
3014 // CHECK2-NEXT:  entry:
3015 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3016 // CHECK2-NEXT:    [[A:%.*]] = alloca i32, align 4
3017 // CHECK2-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
3018 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3019 // CHECK2-NEXT:    store i32 0, i32* [[A]], align 4
3020 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
3021 // CHECK2-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
3022 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
3023 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
3024 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
3025 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
3026 // CHECK2-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
3027 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
3028 // CHECK2-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
3029 // CHECK2-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
3030 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
3031 // CHECK2-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
3032 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
3033 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
3034 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
3035 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
3036 // CHECK2-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
3037 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
3038 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
3039 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
3040 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
3041 // CHECK2-NEXT:    ret i32 [[TMP8]]
3042 //
3043 //
3044 // CHECK2-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
3045 // CHECK2-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
3046 // CHECK2-NEXT:  entry:
3047 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
3048 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3049 // CHECK2-NEXT:    [[B:%.*]] = alloca i32, align 4
3050 // CHECK2-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
3051 // CHECK2-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
3052 // CHECK2-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
3053 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
3054 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
3055 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
3056 // CHECK2-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
3057 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3058 // CHECK2-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
3059 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3060 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
3061 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
3062 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
3063 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
3064 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
3065 // CHECK2-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
3066 // CHECK2-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
3067 // CHECK2-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
3068 // CHECK2-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
3069 // CHECK2-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
3070 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
3071 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B]], align 4
3072 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
3073 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[CONV]], align 4
3074 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
3075 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4
3076 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60
3077 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
3078 // CHECK2:       omp_if.then:
3079 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
3080 // CHECK2-NEXT:    [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]]
3081 // CHECK2-NEXT:    [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
3082 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3083 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
3084 // CHECK2-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8
3085 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3086 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
3087 // CHECK2-NEXT:    store double* [[A]], double** [[TMP13]], align 8
3088 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
3089 // CHECK2-NEXT:    store i64 8, i64* [[TMP14]], align 8
3090 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
3091 // CHECK2-NEXT:    store i8* null, i8** [[TMP15]], align 8
3092 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3093 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
3094 // CHECK2-NEXT:    store i64 [[TMP6]], i64* [[TMP17]], align 8
3095 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3096 // CHECK2-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
3097 // CHECK2-NEXT:    store i64 [[TMP6]], i64* [[TMP19]], align 8
3098 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
3099 // CHECK2-NEXT:    store i64 4, i64* [[TMP20]], align 8
3100 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
3101 // CHECK2-NEXT:    store i8* null, i8** [[TMP21]], align 8
3102 // CHECK2-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3103 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
3104 // CHECK2-NEXT:    store i64 2, i64* [[TMP23]], align 8
3105 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3106 // CHECK2-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
3107 // CHECK2-NEXT:    store i64 2, i64* [[TMP25]], align 8
3108 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
3109 // CHECK2-NEXT:    store i64 8, i64* [[TMP26]], align 8
3110 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
3111 // CHECK2-NEXT:    store i8* null, i8** [[TMP27]], align 8
3112 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
3113 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
3114 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP29]], align 8
3115 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
3116 // CHECK2-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
3117 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP31]], align 8
3118 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
3119 // CHECK2-NEXT:    store i64 8, i64* [[TMP32]], align 8
3120 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
3121 // CHECK2-NEXT:    store i8* null, i8** [[TMP33]], align 8
3122 // CHECK2-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
3123 // CHECK2-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
3124 // CHECK2-NEXT:    store i16* [[VLA]], i16** [[TMP35]], align 8
3125 // CHECK2-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
3126 // CHECK2-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
3127 // CHECK2-NEXT:    store i16* [[VLA]], i16** [[TMP37]], align 8
3128 // CHECK2-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
3129 // CHECK2-NEXT:    store i64 [[TMP9]], i64* [[TMP38]], align 8
3130 // CHECK2-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
3131 // CHECK2-NEXT:    store i8* null, i8** [[TMP39]], align 8
3132 // CHECK2-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3133 // CHECK2-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3134 // CHECK2-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
3135 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
3136 // CHECK2-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
3137 // CHECK2-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
3138 // CHECK2-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3139 // CHECK2:       omp_offload.failed:
3140 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]]
3141 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3142 // CHECK2:       omp_offload.cont:
3143 // CHECK2-NEXT:    br label [[OMP_IF_END:%.*]]
3144 // CHECK2:       omp_if.else:
3145 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]]
3146 // CHECK2-NEXT:    br label [[OMP_IF_END]]
3147 // CHECK2:       omp_if.end:
3148 // CHECK2-NEXT:    [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]]
3149 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]]
3150 // CHECK2-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
3151 // CHECK2-NEXT:    [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
3152 // CHECK2-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP46]] to i32
3153 // CHECK2-NEXT:    [[TMP47:%.*]] = load i32, i32* [[B]], align 4
3154 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]]
3155 // CHECK2-NEXT:    [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
3156 // CHECK2-NEXT:    call void @llvm.stackrestore(i8* [[TMP48]])
3157 // CHECK2-NEXT:    ret i32 [[ADD4]]
3158 //
3159 //
3160 // CHECK2-LABEL: define {{[^@]+}}@_ZL7fstatici
3161 // CHECK2-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
3162 // CHECK2-NEXT:  entry:
3163 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3164 // CHECK2-NEXT:    [[A:%.*]] = alloca i32, align 4
3165 // CHECK2-NEXT:    [[AA:%.*]] = alloca i16, align 2
3166 // CHECK2-NEXT:    [[AAA:%.*]] = alloca i8, align 1
3167 // CHECK2-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
3168 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
3169 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
3170 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
3171 // CHECK2-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
3172 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
3173 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
3174 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
3175 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3176 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3177 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
3178 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
3179 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3180 // CHECK2-NEXT:    store i32 0, i32* [[A]], align 4
3181 // CHECK2-NEXT:    store i16 0, i16* [[AA]], align 2
3182 // CHECK2-NEXT:    store i8 0, i8* [[AAA]], align 1
3183 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
3184 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
3185 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
3186 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
3187 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
3188 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
3189 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
3190 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
3191 // CHECK2-NEXT:    [[TMP4:%.*]] = load i16, i16* [[AA]], align 2
3192 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
3193 // CHECK2-NEXT:    store i16 [[TMP4]], i16* [[CONV2]], align 2
3194 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[AA_CASTED]], align 8
3195 // CHECK2-NEXT:    [[TMP6:%.*]] = load i8, i8* [[AAA]], align 1
3196 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
3197 // CHECK2-NEXT:    store i8 [[TMP6]], i8* [[CONV3]], align 1
3198 // CHECK2-NEXT:    [[TMP7:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
3199 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[N_ADDR]], align 4
3200 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50
3201 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
3202 // CHECK2:       omp_if.then:
3203 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3204 // CHECK2-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
3205 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
3206 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3207 // CHECK2-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64*
3208 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP12]], align 8
3209 // CHECK2-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
3210 // CHECK2-NEXT:    store i8* null, i8** [[TMP13]], align 8
3211 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3212 // CHECK2-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
3213 // CHECK2-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
3214 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3215 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
3216 // CHECK2-NEXT:    store i64 [[TMP3]], i64* [[TMP17]], align 8
3217 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
3218 // CHECK2-NEXT:    store i8* null, i8** [[TMP18]], align 8
3219 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3220 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
3221 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[TMP20]], align 8
3222 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3223 // CHECK2-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
3224 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 8
3225 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
3226 // CHECK2-NEXT:    store i8* null, i8** [[TMP23]], align 8
3227 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
3228 // CHECK2-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
3229 // CHECK2-NEXT:    store i64 [[TMP7]], i64* [[TMP25]], align 8
3230 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
3231 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
3232 // CHECK2-NEXT:    store i64 [[TMP7]], i64* [[TMP27]], align 8
3233 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
3234 // CHECK2-NEXT:    store i8* null, i8** [[TMP28]], align 8
3235 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
3236 // CHECK2-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [10 x i32]**
3237 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP30]], align 8
3238 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
3239 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [10 x i32]**
3240 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP32]], align 8
3241 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
3242 // CHECK2-NEXT:    store i8* null, i8** [[TMP33]], align 8
3243 // CHECK2-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3244 // CHECK2-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3245 // CHECK2-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
3246 // CHECK2-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_]], align 4
3247 // CHECK2-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N_ADDR]], align 4
3248 // CHECK2-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_4]], align 4
3249 // CHECK2-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
3250 // CHECK2-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3251 // CHECK2-NEXT:    [[SUB:%.*]] = sub i32 [[TMP38]], [[TMP39]]
3252 // CHECK2-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
3253 // CHECK2-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
3254 // CHECK2-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
3255 // CHECK2-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
3256 // CHECK2-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
3257 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
3258 // CHECK2-NEXT:    [[ADD8:%.*]] = add i32 [[TMP40]], 1
3259 // CHECK2-NEXT:    [[TMP41:%.*]] = zext i32 [[ADD8]] to i64
3260 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]])
3261 // CHECK2-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
3262 // CHECK2-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
3263 // CHECK2-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3264 // CHECK2:       omp_offload.failed:
3265 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
3266 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3267 // CHECK2:       omp_offload.cont:
3268 // CHECK2-NEXT:    br label [[OMP_IF_END:%.*]]
3269 // CHECK2:       omp_if.else:
3270 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
3271 // CHECK2-NEXT:    br label [[OMP_IF_END]]
3272 // CHECK2:       omp_if.end:
3273 // CHECK2-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
3274 // CHECK2-NEXT:    ret i32 [[TMP44]]
3275 //
3276 //
3277 // CHECK2-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
3278 // CHECK2-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
3279 // CHECK2-NEXT:  entry:
3280 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3281 // CHECK2-NEXT:    [[A:%.*]] = alloca i32, align 4
3282 // CHECK2-NEXT:    [[AA:%.*]] = alloca i16, align 2
3283 // CHECK2-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
3284 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
3285 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
3286 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
3287 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
3288 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
3289 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3290 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3291 // CHECK2-NEXT:    store i32 0, i32* [[A]], align 4
3292 // CHECK2-NEXT:    store i16 0, i16* [[AA]], align 2
3293 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
3294 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
3295 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
3296 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
3297 // CHECK2-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
3298 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
3299 // CHECK2-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
3300 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
3301 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
3302 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
3303 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
3304 // CHECK2:       omp_if.then:
3305 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3306 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
3307 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
3308 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3309 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
3310 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
3311 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
3312 // CHECK2-NEXT:    store i8* null, i8** [[TMP9]], align 8
3313 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3314 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
3315 // CHECK2-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
3316 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3317 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
3318 // CHECK2-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
3319 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
3320 // CHECK2-NEXT:    store i8* null, i8** [[TMP14]], align 8
3321 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3322 // CHECK2-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
3323 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
3324 // CHECK2-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3325 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
3326 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
3327 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
3328 // CHECK2-NEXT:    store i8* null, i8** [[TMP19]], align 8
3329 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3330 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3331 // CHECK2-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
3332 // CHECK2-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
3333 // CHECK2-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
3334 // CHECK2-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3335 // CHECK2:       omp_offload.failed:
3336 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
3337 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3338 // CHECK2:       omp_offload.cont:
3339 // CHECK2-NEXT:    br label [[OMP_IF_END:%.*]]
3340 // CHECK2:       omp_if.else:
3341 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
3342 // CHECK2-NEXT:    br label [[OMP_IF_END]]
3343 // CHECK2:       omp_if.end:
3344 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
3345 // CHECK2-NEXT:    ret i32 [[TMP24]]
3346 //
3347 //
3348 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
3349 // CHECK2-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
3350 // CHECK2-NEXT:  entry:
3351 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
3352 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
3353 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
3354 // CHECK2-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
3355 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
3356 // CHECK2-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
3357 // CHECK2-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
3358 // CHECK2-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
3359 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
3360 // CHECK2-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
3361 // CHECK2-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
3362 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
3363 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
3364 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
3365 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
3366 // CHECK2-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
3367 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
3368 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
3369 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
3370 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
3371 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
3372 // CHECK2-NEXT:    ret void
3373 //
3374 //
3375 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11
3376 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] {
3377 // CHECK2-NEXT:  entry:
3378 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3379 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3380 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
3381 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
3382 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
3383 // CHECK2-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
3384 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
3385 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3386 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3387 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3388 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3389 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3390 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3391 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3392 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3393 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3394 // CHECK2-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
3395 // CHECK2-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
3396 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
3397 // CHECK2-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
3398 // CHECK2-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
3399 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
3400 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
3401 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
3402 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
3403 // CHECK2-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
3404 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3405 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
3406 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3407 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3408 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3409 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
3410 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3411 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3412 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
3413 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3414 // CHECK2:       cond.true:
3415 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3416 // CHECK2:       cond.false:
3417 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3418 // CHECK2-NEXT:    br label [[COND_END]]
3419 // CHECK2:       cond.end:
3420 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
3421 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3422 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3423 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
3424 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3425 // CHECK2:       omp.inner.for.cond:
3426 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3427 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !38
3428 // CHECK2-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
3429 // CHECK2-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3430 // CHECK2:       omp.inner.for.body:
3431 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3432 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
3433 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3434 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !38
3435 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !38
3436 // CHECK2-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double
3437 // CHECK2-NEXT:    [[ADD5:%.*]] = fadd double [[CONV4]], 1.500000e+00
3438 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
3439 // CHECK2-NEXT:    store double [[ADD5]], double* [[A]], align 8, !llvm.access.group !38
3440 // CHECK2-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
3441 // CHECK2-NEXT:    [[TMP13:%.*]] = load double, double* [[A6]], align 8, !llvm.access.group !38
3442 // CHECK2-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
3443 // CHECK2-NEXT:    store double [[INC]], double* [[A6]], align 8, !llvm.access.group !38
3444 // CHECK2-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
3445 // CHECK2-NEXT:    [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]]
3446 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]]
3447 // CHECK2-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
3448 // CHECK2-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !38
3449 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3450 // CHECK2:       omp.body.continue:
3451 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3452 // CHECK2:       omp.inner.for.inc:
3453 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3454 // CHECK2-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP15]], 1
3455 // CHECK2-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3456 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
3457 // CHECK2:       omp.inner.for.end:
3458 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3459 // CHECK2:       omp.loop.exit:
3460 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
3461 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3462 // CHECK2-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
3463 // CHECK2-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3464 // CHECK2:       .omp.final.then:
3465 // CHECK2-NEXT:    store i32 10, i32* [[I]], align 4
3466 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3467 // CHECK2:       .omp.final.done:
3468 // CHECK2-NEXT:    ret void
3469 //
3470 //
3471 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
3472 // CHECK2-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
3473 // CHECK2-NEXT:  entry:
3474 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
3475 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3476 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
3477 // CHECK2-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
3478 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
3479 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
3480 // CHECK2-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
3481 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
3482 // CHECK2-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
3483 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
3484 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3485 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
3486 // CHECK2-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
3487 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
3488 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
3489 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3490 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
3491 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
3492 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
3493 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
3494 // CHECK2-NEXT:    [[CONV4:%.*]] = bitcast i64* [[A_CASTED]] to i32*
3495 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[CONV4]], align 4
3496 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
3497 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 4
3498 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[N_CASTED]] to i32*
3499 // CHECK2-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
3500 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
3501 // CHECK2-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV2]], align 2
3502 // CHECK2-NEXT:    [[CONV6:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
3503 // CHECK2-NEXT:    store i16 [[TMP5]], i16* [[CONV6]], align 2
3504 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AA_CASTED]], align 8
3505 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
3506 // CHECK2-NEXT:    [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
3507 // CHECK2-NEXT:    store i8 [[TMP7]], i8* [[CONV7]], align 1
3508 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
3509 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]])
3510 // CHECK2-NEXT:    ret void
3511 //
3512 //
3513 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13
3514 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
3515 // CHECK2-NEXT:  entry:
3516 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3517 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3518 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
3519 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
3520 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
3521 // CHECK2-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
3522 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
3523 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3524 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3525 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3526 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
3527 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
3528 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3529 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3530 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3531 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3532 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3533 // CHECK2-NEXT:    [[I8:%.*]] = alloca i32, align 4
3534 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3535 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3536 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
3537 // CHECK2-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
3538 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
3539 // CHECK2-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
3540 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
3541 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
3542 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
3543 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
3544 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
3545 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
3546 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
3547 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
3548 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
3549 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_4]], align 4
3550 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
3551 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3552 // CHECK2-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
3553 // CHECK2-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
3554 // CHECK2-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
3555 // CHECK2-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
3556 // CHECK2-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
3557 // CHECK2-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
3558 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3559 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
3560 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3561 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
3562 // CHECK2-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
3563 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3564 // CHECK2:       omp.precond.then:
3565 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3566 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
3567 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
3568 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3569 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3570 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3571 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
3572 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3573 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3574 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
3575 // CHECK2-NEXT:    [[CMP9:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
3576 // CHECK2-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3577 // CHECK2:       cond.true:
3578 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
3579 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3580 // CHECK2:       cond.false:
3581 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3582 // CHECK2-NEXT:    br label [[COND_END]]
3583 // CHECK2:       cond.end:
3584 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
3585 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3586 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3587 // CHECK2-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
3588 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3589 // CHECK2:       omp.inner.for.cond:
3590 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
3591 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
3592 // CHECK2-NEXT:    [[ADD10:%.*]] = add i32 [[TMP17]], 1
3593 // CHECK2-NEXT:    [[CMP11:%.*]] = icmp ult i32 [[TMP16]], [[ADD10]]
3594 // CHECK2-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3595 // CHECK2:       omp.inner.for.body:
3596 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !41
3597 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
3598 // CHECK2-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
3599 // CHECK2-NEXT:    [[ADD12:%.*]] = add i32 [[TMP18]], [[MUL]]
3600 // CHECK2-NEXT:    store i32 [[ADD12]], i32* [[I8]], align 4, !llvm.access.group !41
3601 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !41
3602 // CHECK2-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP20]], 1
3603 // CHECK2-NEXT:    store i32 [[ADD13]], i32* [[CONV]], align 4, !llvm.access.group !41
3604 // CHECK2-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV2]], align 2, !llvm.access.group !41
3605 // CHECK2-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP21]] to i32
3606 // CHECK2-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
3607 // CHECK2-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
3608 // CHECK2-NEXT:    store i16 [[CONV16]], i16* [[CONV2]], align 2, !llvm.access.group !41
3609 // CHECK2-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV3]], align 1, !llvm.access.group !41
3610 // CHECK2-NEXT:    [[CONV17:%.*]] = sext i8 [[TMP22]] to i32
3611 // CHECK2-NEXT:    [[ADD18:%.*]] = add nsw i32 [[CONV17]], 1
3612 // CHECK2-NEXT:    [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
3613 // CHECK2-NEXT:    store i8 [[CONV19]], i8* [[CONV3]], align 1, !llvm.access.group !41
3614 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
3615 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !41
3616 // CHECK2-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP23]], 1
3617 // CHECK2-NEXT:    store i32 [[ADD20]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !41
3618 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3619 // CHECK2:       omp.body.continue:
3620 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3621 // CHECK2:       omp.inner.for.inc:
3622 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
3623 // CHECK2-NEXT:    [[ADD21:%.*]] = add i32 [[TMP24]], 1
3624 // CHECK2-NEXT:    store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
3625 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
3626 // CHECK2:       omp.inner.for.end:
3627 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3628 // CHECK2:       omp.loop.exit:
3629 // CHECK2-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3630 // CHECK2-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
3631 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
3632 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3633 // CHECK2-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
3634 // CHECK2-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3635 // CHECK2:       .omp.final.then:
3636 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3637 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
3638 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3639 // CHECK2-NEXT:    [[SUB22:%.*]] = sub i32 [[TMP30]], [[TMP31]]
3640 // CHECK2-NEXT:    [[SUB23:%.*]] = sub i32 [[SUB22]], 1
3641 // CHECK2-NEXT:    [[ADD24:%.*]] = add i32 [[SUB23]], 1
3642 // CHECK2-NEXT:    [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
3643 // CHECK2-NEXT:    [[MUL26:%.*]] = mul i32 [[DIV25]], 1
3644 // CHECK2-NEXT:    [[ADD27:%.*]] = add i32 [[TMP29]], [[MUL26]]
3645 // CHECK2-NEXT:    store i32 [[ADD27]], i32* [[I8]], align 4
3646 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3647 // CHECK2:       .omp.final.done:
3648 // CHECK2-NEXT:    br label [[OMP_PRECOND_END]]
3649 // CHECK2:       omp.precond.end:
3650 // CHECK2-NEXT:    ret void
3651 //
3652 //
3653 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
3654 // CHECK2-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
3655 // CHECK2-NEXT:  entry:
3656 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
3657 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
3658 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
3659 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
3660 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
3661 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
3662 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
3663 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
3664 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
3665 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
3666 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
3667 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
3668 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
3669 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
3670 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
3671 // CHECK2-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
3672 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
3673 // CHECK2-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
3674 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
3675 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
3676 // CHECK2-NEXT:    ret void
3677 //
3678 //
3679 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16
3680 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
3681 // CHECK2-NEXT:  entry:
3682 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3683 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3684 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
3685 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
3686 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
3687 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3688 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3689 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3690 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3691 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3692 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3693 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
3694 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3695 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3696 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
3697 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
3698 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
3699 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
3700 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
3701 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
3702 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3703 // CHECK2-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
3704 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3705 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3706 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3707 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
3708 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3709 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3710 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
3711 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3712 // CHECK2:       cond.true:
3713 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3714 // CHECK2:       cond.false:
3715 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3716 // CHECK2-NEXT:    br label [[COND_END]]
3717 // CHECK2:       cond.end:
3718 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3719 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3720 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3721 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3722 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3723 // CHECK2:       omp.inner.for.cond:
3724 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
3725 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !44
3726 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3727 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3728 // CHECK2:       omp.inner.for.body:
3729 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
3730 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
3731 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3732 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !44
3733 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !44
3734 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
3735 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !44
3736 // CHECK2-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !44
3737 // CHECK2-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
3738 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
3739 // CHECK2-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
3740 // CHECK2-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !44
3741 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
3742 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
3743 // CHECK2-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
3744 // CHECK2-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
3745 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3746 // CHECK2:       omp.body.continue:
3747 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3748 // CHECK2:       omp.inner.for.inc:
3749 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
3750 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP12]], 1
3751 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
3752 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
3753 // CHECK2:       omp.inner.for.end:
3754 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3755 // CHECK2:       omp.loop.exit:
3756 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3757 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3758 // CHECK2-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3759 // CHECK2-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3760 // CHECK2:       .omp.final.then:
3761 // CHECK2-NEXT:    store i32 10, i32* [[I]], align 4
3762 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3763 // CHECK2:       .omp.final.done:
3764 // CHECK2-NEXT:    ret void
3765 //
3766 //
3767 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
3768 // CHECK2-SAME: () #[[ATTR5]] {
3769 // CHECK2-NEXT:  entry:
3770 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
3771 // CHECK2-NEXT:    ret void
3772 //
3773 //
3774 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooi
3775 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
3776 // CHECK3-NEXT:  entry:
3777 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3778 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
3779 // CHECK3-NEXT:    [[AA:%.*]] = alloca i16, align 2
3780 // CHECK3-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
3781 // CHECK3-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
3782 // CHECK3-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
3783 // CHECK3-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
3784 // CHECK3-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
3785 // CHECK3-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
3786 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3787 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3788 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
3789 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
3790 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED3:%.*]] = alloca i32, align 4
3791 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
3792 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
3793 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
3794 // CHECK3-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
3795 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
3796 // CHECK3-NEXT:    [[AA_CASTED4:%.*]] = alloca i32, align 4
3797 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [1 x i8*], align 4
3798 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS7:%.*]] = alloca [1 x i8*], align 4
3799 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [1 x i8*], align 4
3800 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3801 // CHECK3-NEXT:    [[A_CASTED9:%.*]] = alloca i32, align 4
3802 // CHECK3-NEXT:    [[AA_CASTED10:%.*]] = alloca i32, align 4
3803 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS12:%.*]] = alloca [2 x i8*], align 4
3804 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS13:%.*]] = alloca [2 x i8*], align 4
3805 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS14:%.*]] = alloca [2 x i8*], align 4
3806 // CHECK3-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
3807 // CHECK3-NEXT:    [[A_CASTED18:%.*]] = alloca i32, align 4
3808 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [9 x i8*], align 4
3809 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS22:%.*]] = alloca [9 x i8*], align 4
3810 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [9 x i8*], align 4
3811 // CHECK3-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4
3812 // CHECK3-NEXT:    [[_TMP24:%.*]] = alloca i32, align 4
3813 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
3814 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3815 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
3816 // CHECK3-NEXT:    store i16 0, i16* [[AA]], align 2
3817 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
3818 // CHECK3-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
3819 // CHECK3-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
3820 // CHECK3-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
3821 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
3822 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
3823 // CHECK3-NEXT:    [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
3824 // CHECK3-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
3825 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
3826 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
3827 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
3828 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
3829 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_2]], align 4
3830 // CHECK3-NEXT:    [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
3831 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
3832 // CHECK3-NEXT:    store i16 [[TMP7]], i16* [[CONV]], align 2
3833 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AA_CASTED]], align 4
3834 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3835 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
3836 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
3837 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3838 // CHECK3-NEXT:    store i32 [[TMP11]], i32* [[DOTCAPTURE_EXPR__CASTED3]], align 4
3839 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED3]], align 4
3840 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3841 // CHECK3-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32*
3842 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[TMP14]], align 4
3843 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3844 // CHECK3-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32*
3845 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[TMP16]], align 4
3846 // CHECK3-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3847 // CHECK3-NEXT:    store i8* null, i8** [[TMP17]], align 4
3848 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3849 // CHECK3-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
3850 // CHECK3-NEXT:    store i32 [[TMP10]], i32* [[TMP19]], align 4
3851 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3852 // CHECK3-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32*
3853 // CHECK3-NEXT:    store i32 [[TMP10]], i32* [[TMP21]], align 4
3854 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
3855 // CHECK3-NEXT:    store i8* null, i8** [[TMP22]], align 4
3856 // CHECK3-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3857 // CHECK3-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32*
3858 // CHECK3-NEXT:    store i32 [[TMP12]], i32* [[TMP24]], align 4
3859 // CHECK3-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3860 // CHECK3-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32*
3861 // CHECK3-NEXT:    store i32 [[TMP12]], i32* [[TMP26]], align 4
3862 // CHECK3-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
3863 // CHECK3-NEXT:    store i8* null, i8** [[TMP27]], align 4
3864 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3865 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3866 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
3867 // CHECK3-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2
3868 // CHECK3-NEXT:    store i16 [[TMP31]], i16* [[TMP30]], align 4
3869 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
3870 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3871 // CHECK3-NEXT:    store i32 [[TMP33]], i32* [[TMP32]], align 4
3872 // CHECK3-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
3873 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3874 // CHECK3-NEXT:    store i32 [[TMP35]], i32* [[TMP34]], align 4
3875 // CHECK3-NEXT:    [[TMP36:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
3876 // CHECK3-NEXT:    [[TMP37:%.*]] = bitcast i8* [[TMP36]] to %struct.kmp_task_t_with_privates*
3877 // CHECK3-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP37]], i32 0, i32 0
3878 // CHECK3-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP38]], i32 0, i32 0
3879 // CHECK3-NEXT:    [[TMP40:%.*]] = load i8*, i8** [[TMP39]], align 4
3880 // CHECK3-NEXT:    [[TMP41:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
3881 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP40]], i8* align 4 [[TMP41]], i32 12, i1 false)
3882 // CHECK3-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP37]], i32 0, i32 1
3883 // CHECK3-NEXT:    [[TMP43:%.*]] = bitcast i8* [[TMP40]] to %struct.anon*
3884 // CHECK3-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 0
3885 // CHECK3-NEXT:    [[TMP45:%.*]] = bitcast [3 x i64]* [[TMP44]] to i8*
3886 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP45]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false)
3887 // CHECK3-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 1
3888 // CHECK3-NEXT:    [[TMP47:%.*]] = bitcast [3 x i8*]* [[TMP46]] to i8*
3889 // CHECK3-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP28]] to i8*
3890 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP47]], i8* align 4 [[TMP48]], i32 12, i1 false)
3891 // CHECK3-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 2
3892 // CHECK3-NEXT:    [[TMP50:%.*]] = bitcast [3 x i8*]* [[TMP49]] to i8*
3893 // CHECK3-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP29]] to i8*
3894 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP50]], i8* align 4 [[TMP51]], i32 12, i1 false)
3895 // CHECK3-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 3
3896 // CHECK3-NEXT:    [[TMP53:%.*]] = load i16, i16* [[AA]], align 2
3897 // CHECK3-NEXT:    store i16 [[TMP53]], i16* [[TMP52]], align 4
3898 // CHECK3-NEXT:    [[TMP54:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP36]])
3899 // CHECK3-NEXT:    [[TMP55:%.*]] = load i32, i32* [[A]], align 4
3900 // CHECK3-NEXT:    store i32 [[TMP55]], i32* [[A_CASTED]], align 4
3901 // CHECK3-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A_CASTED]], align 4
3902 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i32 [[TMP56]]) #[[ATTR4:[0-9]+]]
3903 // CHECK3-NEXT:    [[TMP57:%.*]] = load i16, i16* [[AA]], align 2
3904 // CHECK3-NEXT:    [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16*
3905 // CHECK3-NEXT:    store i16 [[TMP57]], i16* [[CONV5]], align 2
3906 // CHECK3-NEXT:    [[TMP58:%.*]] = load i32, i32* [[AA_CASTED4]], align 4
3907 // CHECK3-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
3908 // CHECK3-NEXT:    [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32*
3909 // CHECK3-NEXT:    store i32 [[TMP58]], i32* [[TMP60]], align 4
3910 // CHECK3-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
3911 // CHECK3-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32*
3912 // CHECK3-NEXT:    store i32 [[TMP58]], i32* [[TMP62]], align 4
3913 // CHECK3-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0
3914 // CHECK3-NEXT:    store i8* null, i8** [[TMP63]], align 4
3915 // CHECK3-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
3916 // CHECK3-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
3917 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
3918 // CHECK3-NEXT:    [[TMP66:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111.region_id, i32 1, i8** [[TMP64]], i8** [[TMP65]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
3919 // CHECK3-NEXT:    [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0
3920 // CHECK3-NEXT:    br i1 [[TMP67]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3921 // CHECK3:       omp_offload.failed:
3922 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i32 [[TMP58]]) #[[ATTR4]]
3923 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3924 // CHECK3:       omp_offload.cont:
3925 // CHECK3-NEXT:    [[TMP68:%.*]] = load i32, i32* [[A]], align 4
3926 // CHECK3-NEXT:    store i32 [[TMP68]], i32* [[A_CASTED9]], align 4
3927 // CHECK3-NEXT:    [[TMP69:%.*]] = load i32, i32* [[A_CASTED9]], align 4
3928 // CHECK3-NEXT:    [[TMP70:%.*]] = load i16, i16* [[AA]], align 2
3929 // CHECK3-NEXT:    [[CONV11:%.*]] = bitcast i32* [[AA_CASTED10]] to i16*
3930 // CHECK3-NEXT:    store i16 [[TMP70]], i16* [[CONV11]], align 2
3931 // CHECK3-NEXT:    [[TMP71:%.*]] = load i32, i32* [[AA_CASTED10]], align 4
3932 // CHECK3-NEXT:    [[TMP72:%.*]] = load i32, i32* [[N_ADDR]], align 4
3933 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP72]], 10
3934 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
3935 // CHECK3:       omp_if.then:
3936 // CHECK3-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
3937 // CHECK3-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
3938 // CHECK3-NEXT:    store i32 [[TMP69]], i32* [[TMP74]], align 4
3939 // CHECK3-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
3940 // CHECK3-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
3941 // CHECK3-NEXT:    store i32 [[TMP69]], i32* [[TMP76]], align 4
3942 // CHECK3-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i32 0, i32 0
3943 // CHECK3-NEXT:    store i8* null, i8** [[TMP77]], align 4
3944 // CHECK3-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 1
3945 // CHECK3-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
3946 // CHECK3-NEXT:    store i32 [[TMP71]], i32* [[TMP79]], align 4
3947 // CHECK3-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 1
3948 // CHECK3-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
3949 // CHECK3-NEXT:    store i32 [[TMP71]], i32* [[TMP81]], align 4
3950 // CHECK3-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i32 0, i32 1
3951 // CHECK3-NEXT:    store i8* null, i8** [[TMP82]], align 4
3952 // CHECK3-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
3953 // CHECK3-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
3954 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
3955 // CHECK3-NEXT:    [[TMP85:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118.region_id, i32 2, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
3956 // CHECK3-NEXT:    [[TMP86:%.*]] = icmp ne i32 [[TMP85]], 0
3957 // CHECK3-NEXT:    br i1 [[TMP86]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
3958 // CHECK3:       omp_offload.failed16:
3959 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP69]], i32 [[TMP71]]) #[[ATTR4]]
3960 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT17]]
3961 // CHECK3:       omp_offload.cont17:
3962 // CHECK3-NEXT:    br label [[OMP_IF_END:%.*]]
3963 // CHECK3:       omp_if.else:
3964 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP69]], i32 [[TMP71]]) #[[ATTR4]]
3965 // CHECK3-NEXT:    br label [[OMP_IF_END]]
3966 // CHECK3:       omp_if.end:
3967 // CHECK3-NEXT:    [[TMP87:%.*]] = load i32, i32* [[A]], align 4
3968 // CHECK3-NEXT:    store i32 [[TMP87]], i32* [[A_CASTED18]], align 4
3969 // CHECK3-NEXT:    [[TMP88:%.*]] = load i32, i32* [[A_CASTED18]], align 4
3970 // CHECK3-NEXT:    [[TMP89:%.*]] = load i32, i32* [[N_ADDR]], align 4
3971 // CHECK3-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[TMP89]], 20
3972 // CHECK3-NEXT:    br i1 [[CMP19]], label [[OMP_IF_THEN20:%.*]], label [[OMP_IF_ELSE27:%.*]]
3973 // CHECK3:       omp_if.then20:
3974 // CHECK3-NEXT:    [[TMP90:%.*]] = mul nuw i32 [[TMP1]], 4
3975 // CHECK3-NEXT:    [[TMP91:%.*]] = sext i32 [[TMP90]] to i64
3976 // CHECK3-NEXT:    [[TMP92:%.*]] = mul nuw i32 5, [[TMP3]]
3977 // CHECK3-NEXT:    [[TMP93:%.*]] = mul nuw i32 [[TMP92]], 8
3978 // CHECK3-NEXT:    [[TMP94:%.*]] = sext i32 [[TMP93]] to i64
3979 // CHECK3-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
3980 // CHECK3-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32*
3981 // CHECK3-NEXT:    store i32 [[TMP88]], i32* [[TMP96]], align 4
3982 // CHECK3-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
3983 // CHECK3-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32*
3984 // CHECK3-NEXT:    store i32 [[TMP88]], i32* [[TMP98]], align 4
3985 // CHECK3-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
3986 // CHECK3-NEXT:    store i64 4, i64* [[TMP99]], align 4
3987 // CHECK3-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
3988 // CHECK3-NEXT:    store i8* null, i8** [[TMP100]], align 4
3989 // CHECK3-NEXT:    [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
3990 // CHECK3-NEXT:    [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]**
3991 // CHECK3-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4
3992 // CHECK3-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
3993 // CHECK3-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]**
3994 // CHECK3-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4
3995 // CHECK3-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
3996 // CHECK3-NEXT:    store i64 40, i64* [[TMP105]], align 4
3997 // CHECK3-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
3998 // CHECK3-NEXT:    store i8* null, i8** [[TMP106]], align 4
3999 // CHECK3-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2
4000 // CHECK3-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32*
4001 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP108]], align 4
4002 // CHECK3-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2
4003 // CHECK3-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32*
4004 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP110]], align 4
4005 // CHECK3-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
4006 // CHECK3-NEXT:    store i64 4, i64* [[TMP111]], align 4
4007 // CHECK3-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2
4008 // CHECK3-NEXT:    store i8* null, i8** [[TMP112]], align 4
4009 // CHECK3-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3
4010 // CHECK3-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float**
4011 // CHECK3-NEXT:    store float* [[VLA]], float** [[TMP114]], align 4
4012 // CHECK3-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3
4013 // CHECK3-NEXT:    [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float**
4014 // CHECK3-NEXT:    store float* [[VLA]], float** [[TMP116]], align 4
4015 // CHECK3-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
4016 // CHECK3-NEXT:    store i64 [[TMP91]], i64* [[TMP117]], align 4
4017 // CHECK3-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3
4018 // CHECK3-NEXT:    store i8* null, i8** [[TMP118]], align 4
4019 // CHECK3-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4
4020 // CHECK3-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]**
4021 // CHECK3-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4
4022 // CHECK3-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4
4023 // CHECK3-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]**
4024 // CHECK3-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4
4025 // CHECK3-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
4026 // CHECK3-NEXT:    store i64 400, i64* [[TMP123]], align 4
4027 // CHECK3-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4
4028 // CHECK3-NEXT:    store i8* null, i8** [[TMP124]], align 4
4029 // CHECK3-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5
4030 // CHECK3-NEXT:    [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32*
4031 // CHECK3-NEXT:    store i32 5, i32* [[TMP126]], align 4
4032 // CHECK3-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5
4033 // CHECK3-NEXT:    [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32*
4034 // CHECK3-NEXT:    store i32 5, i32* [[TMP128]], align 4
4035 // CHECK3-NEXT:    [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
4036 // CHECK3-NEXT:    store i64 4, i64* [[TMP129]], align 4
4037 // CHECK3-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5
4038 // CHECK3-NEXT:    store i8* null, i8** [[TMP130]], align 4
4039 // CHECK3-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6
4040 // CHECK3-NEXT:    [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32*
4041 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP132]], align 4
4042 // CHECK3-NEXT:    [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6
4043 // CHECK3-NEXT:    [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32*
4044 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP134]], align 4
4045 // CHECK3-NEXT:    [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
4046 // CHECK3-NEXT:    store i64 4, i64* [[TMP135]], align 4
4047 // CHECK3-NEXT:    [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6
4048 // CHECK3-NEXT:    store i8* null, i8** [[TMP136]], align 4
4049 // CHECK3-NEXT:    [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7
4050 // CHECK3-NEXT:    [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double**
4051 // CHECK3-NEXT:    store double* [[VLA1]], double** [[TMP138]], align 4
4052 // CHECK3-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7
4053 // CHECK3-NEXT:    [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double**
4054 // CHECK3-NEXT:    store double* [[VLA1]], double** [[TMP140]], align 4
4055 // CHECK3-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
4056 // CHECK3-NEXT:    store i64 [[TMP94]], i64* [[TMP141]], align 4
4057 // CHECK3-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7
4058 // CHECK3-NEXT:    store i8* null, i8** [[TMP142]], align 4
4059 // CHECK3-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8
4060 // CHECK3-NEXT:    [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT**
4061 // CHECK3-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4
4062 // CHECK3-NEXT:    [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8
4063 // CHECK3-NEXT:    [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT**
4064 // CHECK3-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4
4065 // CHECK3-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
4066 // CHECK3-NEXT:    store i64 12, i64* [[TMP147]], align 4
4067 // CHECK3-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8
4068 // CHECK3-NEXT:    store i8* null, i8** [[TMP148]], align 4
4069 // CHECK3-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
4070 // CHECK3-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
4071 // CHECK3-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
4072 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
4073 // CHECK3-NEXT:    [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
4074 // CHECK3-NEXT:    [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0
4075 // CHECK3-NEXT:    br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
4076 // CHECK3:       omp_offload.failed25:
4077 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
4078 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT26]]
4079 // CHECK3:       omp_offload.cont26:
4080 // CHECK3-NEXT:    br label [[OMP_IF_END28:%.*]]
4081 // CHECK3:       omp_if.else27:
4082 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
4083 // CHECK3-NEXT:    br label [[OMP_IF_END28]]
4084 // CHECK3:       omp_if.end28:
4085 // CHECK3-NEXT:    [[TMP154:%.*]] = load i32, i32* [[A]], align 4
4086 // CHECK3-NEXT:    [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
4087 // CHECK3-NEXT:    call void @llvm.stackrestore(i8* [[TMP155]])
4088 // CHECK3-NEXT:    ret i32 [[TMP154]]
4089 //
4090 //
4091 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
4092 // CHECK3-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
4093 // CHECK3-NEXT:  entry:
4094 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4095 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
4096 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
4097 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4098 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
4099 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4100 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4101 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
4102 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4103 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4104 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
4105 // CHECK3-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
4106 // CHECK3-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
4107 // CHECK3-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4108 // CHECK3-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
4109 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4110 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP4]])
4111 // CHECK3-NEXT:    ret void
4112 //
4113 //
4114 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
4115 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
4116 // CHECK3-NEXT:  entry:
4117 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4118 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4119 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4120 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4121 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4122 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4123 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4124 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4125 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4126 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4127 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4128 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4129 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4130 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4131 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4132 // CHECK3-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
4133 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4134 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4135 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4136 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
4137 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4138 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4139 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
4140 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4141 // CHECK3:       cond.true:
4142 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4143 // CHECK3:       cond.false:
4144 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4145 // CHECK3-NEXT:    br label [[COND_END]]
4146 // CHECK3:       cond.end:
4147 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
4148 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4149 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4150 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
4151 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4152 // CHECK3:       omp.inner.for.cond:
4153 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4154 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
4155 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
4156 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4157 // CHECK3:       omp.inner.for.body:
4158 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4159 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
4160 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4161 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
4162 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4163 // CHECK3:       omp.body.continue:
4164 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4165 // CHECK3:       omp.inner.for.inc:
4166 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4167 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
4168 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
4169 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
4170 // CHECK3:       omp.inner.for.end:
4171 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4172 // CHECK3:       omp.loop.exit:
4173 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
4174 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4175 // CHECK3-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
4176 // CHECK3-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4177 // CHECK3:       .omp.final.then:
4178 // CHECK3-NEXT:    store i32 10, i32* [[I]], align 4
4179 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4180 // CHECK3:       .omp.final.done:
4181 // CHECK3-NEXT:    ret void
4182 //
4183 //
4184 // CHECK3-LABEL: define {{[^@]+}}@.omp_task_privates_map.
4185 // CHECK3-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i16** noalias noundef [[TMP1:%.*]], [3 x i8*]** noalias noundef [[TMP2:%.*]], [3 x i8*]** noalias noundef [[TMP3:%.*]], [3 x i64]** noalias noundef [[TMP4:%.*]]) #[[ATTR5:[0-9]+]] {
4186 // CHECK3-NEXT:  entry:
4187 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 4
4188 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca i16**, align 4
4189 // CHECK3-NEXT:    [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 4
4190 // CHECK3-NEXT:    [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 4
4191 // CHECK3-NEXT:    [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 4
4192 // CHECK3-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 4
4193 // CHECK3-NEXT:    store i16** [[TMP1]], i16*** [[DOTADDR1]], align 4
4194 // CHECK3-NEXT:    store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 4
4195 // CHECK3-NEXT:    store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 4
4196 // CHECK3-NEXT:    store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 4
4197 // CHECK3-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 4
4198 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0
4199 // CHECK3-NEXT:    [[TMP7:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 4
4200 // CHECK3-NEXT:    store [3 x i64]* [[TMP6]], [3 x i64]** [[TMP7]], align 4
4201 // CHECK3-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1
4202 // CHECK3-NEXT:    [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 4
4203 // CHECK3-NEXT:    store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 4
4204 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2
4205 // CHECK3-NEXT:    [[TMP11:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 4
4206 // CHECK3-NEXT:    store [3 x i8*]* [[TMP10]], [3 x i8*]** [[TMP11]], align 4
4207 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3
4208 // CHECK3-NEXT:    [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 4
4209 // CHECK3-NEXT:    store i16* [[TMP12]], i16** [[TMP13]], align 4
4210 // CHECK3-NEXT:    ret void
4211 //
4212 //
4213 // CHECK3-LABEL: define {{[^@]+}}@.omp_task_entry.
4214 // CHECK3-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
4215 // CHECK3-NEXT:  entry:
4216 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
4217 // CHECK3-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
4218 // CHECK3-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
4219 // CHECK3-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
4220 // CHECK3-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
4221 // CHECK3-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
4222 // CHECK3-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 4
4223 // CHECK3-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 4
4224 // CHECK3-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 4
4225 // CHECK3-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 4
4226 // CHECK3-NEXT:    [[AA_CASTED_I:%.*]] = alloca i32, align 4
4227 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED_I:%.*]] = alloca i32, align 4
4228 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED4_I:%.*]] = alloca i32, align 4
4229 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
4230 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
4231 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
4232 // CHECK3-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
4233 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
4234 // CHECK3-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
4235 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
4236 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
4237 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
4238 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
4239 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
4240 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
4241 // CHECK3-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
4242 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
4243 // CHECK3-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
4244 // CHECK3-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
4245 // CHECK3-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]])
4246 // CHECK3-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]])
4247 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27
4248 // CHECK3-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !27
4249 // CHECK3-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
4250 // CHECK3-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
4251 // CHECK3-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !27
4252 // CHECK3-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
4253 // CHECK3-NEXT:    [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
4254 // CHECK3-NEXT:    [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
4255 // CHECK3-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
4256 // CHECK3-NEXT:    [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)*
4257 // CHECK3-NEXT:    call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
4258 // CHECK3-NEXT:    [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !27
4259 // CHECK3-NEXT:    [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !27
4260 // CHECK3-NEXT:    [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !27
4261 // CHECK3-NEXT:    [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !27
4262 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i32 0, i32 0
4263 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i32 0, i32 0
4264 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i32 0, i32 0
4265 // CHECK3-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
4266 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
4267 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[TMP23]], align 4
4268 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) #[[ATTR4]]
4269 // CHECK3-NEXT:    [[TMP26:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 [[TMP25]], i32 1, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
4270 // CHECK3-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
4271 // CHECK3-NEXT:    br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
4272 // CHECK3:       omp_offload.failed.i:
4273 // CHECK3-NEXT:    [[TMP28:%.*]] = load i16, i16* [[TMP16]], align 2
4274 // CHECK3-NEXT:    [[CONV_I:%.*]] = bitcast i32* [[AA_CASTED_I]] to i16*
4275 // CHECK3-NEXT:    store i16 [[TMP28]], i16* [[CONV_I]], align 2, !noalias !27
4276 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[AA_CASTED_I]], align 4, !noalias !27
4277 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP23]], align 4
4278 // CHECK3-NEXT:    store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27
4279 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27
4280 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP24]], align 4
4281 // CHECK3-NEXT:    store i32 [[TMP32]], i32* [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27
4282 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27
4283 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i32 [[TMP29]], i32 [[TMP31]], i32 [[TMP33]]) #[[ATTR4]]
4284 // CHECK3-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
4285 // CHECK3:       .omp_outlined..1.exit:
4286 // CHECK3-NEXT:    ret i32 0
4287 //
4288 //
4289 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
4290 // CHECK3-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
4291 // CHECK3-NEXT:  entry:
4292 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4293 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4294 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4295 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
4296 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
4297 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
4298 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]])
4299 // CHECK3-NEXT:    ret void
4300 //
4301 //
4302 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
4303 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
4304 // CHECK3-NEXT:  entry:
4305 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4306 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4307 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4308 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4309 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4310 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4311 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4312 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4313 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4314 // CHECK3-NEXT:    [[A1:%.*]] = alloca i32, align 4
4315 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4316 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4317 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4318 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4319 // CHECK3-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
4320 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4321 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4322 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4323 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
4324 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4325 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4326 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
4327 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4328 // CHECK3:       cond.true:
4329 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4330 // CHECK3:       cond.false:
4331 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4332 // CHECK3-NEXT:    br label [[COND_END]]
4333 // CHECK3:       cond.end:
4334 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
4335 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4336 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4337 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
4338 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4339 // CHECK3:       omp.inner.for.cond:
4340 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4341 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4342 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
4343 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4344 // CHECK3:       omp.inner.for.body:
4345 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4346 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
4347 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4348 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4
4349 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A1]], align 4
4350 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
4351 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[A1]], align 4
4352 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4353 // CHECK3:       omp.body.continue:
4354 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4355 // CHECK3:       omp.inner.for.inc:
4356 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4357 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1
4358 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
4359 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
4360 // CHECK3:       omp.inner.for.end:
4361 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4362 // CHECK3:       omp.loop.exit:
4363 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
4364 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4365 // CHECK3-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
4366 // CHECK3-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4367 // CHECK3:       .omp.final.then:
4368 // CHECK3-NEXT:    store i32 10, i32* [[A_ADDR]], align 4
4369 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4370 // CHECK3:       .omp.final.done:
4371 // CHECK3-NEXT:    ret void
4372 //
4373 //
4374 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
4375 // CHECK3-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR2]] {
4376 // CHECK3-NEXT:  entry:
4377 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4378 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4379 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4380 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4381 // CHECK3-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
4382 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4383 // CHECK3-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
4384 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4385 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]])
4386 // CHECK3-NEXT:    ret void
4387 //
4388 //
4389 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
4390 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
4391 // CHECK3-NEXT:  entry:
4392 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4393 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4394 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4395 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4396 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4397 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4398 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4399 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4400 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4401 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4402 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4403 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4404 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4405 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4406 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4407 // CHECK3-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
4408 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4409 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4410 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4411 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
4412 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4413 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4414 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
4415 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4416 // CHECK3:       cond.true:
4417 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4418 // CHECK3:       cond.false:
4419 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4420 // CHECK3-NEXT:    br label [[COND_END]]
4421 // CHECK3:       cond.end:
4422 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
4423 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4424 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4425 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
4426 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4427 // CHECK3:       omp.inner.for.cond:
4428 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
4429 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
4430 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
4431 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4432 // CHECK3:       omp.inner.for.body:
4433 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
4434 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
4435 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4436 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !30
4437 // CHECK3-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
4438 // CHECK3-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
4439 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
4440 // CHECK3-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
4441 // CHECK3-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !30
4442 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4443 // CHECK3:       omp.body.continue:
4444 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4445 // CHECK3:       omp.inner.for.inc:
4446 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
4447 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
4448 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
4449 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
4450 // CHECK3:       omp.inner.for.end:
4451 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4452 // CHECK3:       omp.loop.exit:
4453 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
4454 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4455 // CHECK3-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
4456 // CHECK3-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4457 // CHECK3:       .omp.final.then:
4458 // CHECK3-NEXT:    store i32 10, i32* [[I]], align 4
4459 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4460 // CHECK3:       .omp.final.done:
4461 // CHECK3-NEXT:    ret void
4462 //
4463 //
4464 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
4465 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] {
4466 // CHECK3-NEXT:  entry:
4467 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4468 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4469 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4470 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4471 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4472 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4473 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4474 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
4475 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
4476 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
4477 // CHECK3-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
4478 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4479 // CHECK3-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
4480 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4481 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
4482 // CHECK3-NEXT:    ret void
4483 //
4484 //
4485 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..6
4486 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
4487 // CHECK3-NEXT:  entry:
4488 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4489 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4490 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4491 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4492 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4493 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4494 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4495 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4496 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4497 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4498 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4499 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4500 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4501 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4502 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4503 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4504 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4505 // CHECK3-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
4506 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4507 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4508 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4509 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
4510 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4511 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4512 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
4513 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4514 // CHECK3:       cond.true:
4515 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4516 // CHECK3:       cond.false:
4517 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4518 // CHECK3-NEXT:    br label [[COND_END]]
4519 // CHECK3:       cond.end:
4520 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
4521 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4522 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4523 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
4524 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4525 // CHECK3:       omp.inner.for.cond:
4526 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
4527 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
4528 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
4529 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4530 // CHECK3:       omp.inner.for.body:
4531 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
4532 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
4533 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4534 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
4535 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33
4536 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
4537 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !33
4538 // CHECK3-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !33
4539 // CHECK3-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP9]] to i32
4540 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
4541 // CHECK3-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
4542 // CHECK3-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !33
4543 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4544 // CHECK3:       omp.body.continue:
4545 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4546 // CHECK3:       omp.inner.for.inc:
4547 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
4548 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1
4549 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
4550 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
4551 // CHECK3:       omp.inner.for.end:
4552 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4553 // CHECK3:       omp.loop.exit:
4554 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
4555 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4556 // CHECK3-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
4557 // CHECK3-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4558 // CHECK3:       .omp.final.then:
4559 // CHECK3-NEXT:    store i32 10, i32* [[I]], align 4
4560 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4561 // CHECK3:       .omp.final.done:
4562 // CHECK3-NEXT:    ret void
4563 //
4564 //
4565 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
4566 // CHECK3-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
4567 // CHECK3-NEXT:  entry:
4568 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4569 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
4570 // CHECK3-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
4571 // CHECK3-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
4572 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
4573 // CHECK3-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
4574 // CHECK3-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
4575 // CHECK3-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
4576 // CHECK3-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
4577 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4578 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4579 // CHECK3-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
4580 // CHECK3-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
4581 // CHECK3-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
4582 // CHECK3-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
4583 // CHECK3-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
4584 // CHECK3-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
4585 // CHECK3-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
4586 // CHECK3-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
4587 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
4588 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
4589 // CHECK3-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
4590 // CHECK3-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
4591 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
4592 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
4593 // CHECK3-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
4594 // CHECK3-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
4595 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
4596 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
4597 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
4598 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
4599 // CHECK3-NEXT:    ret void
4600 //
4601 //
4602 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..9
4603 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR3]] {
4604 // CHECK3-NEXT:  entry:
4605 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4606 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4607 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4608 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
4609 // CHECK3-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
4610 // CHECK3-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
4611 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
4612 // CHECK3-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
4613 // CHECK3-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
4614 // CHECK3-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
4615 // CHECK3-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
4616 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4617 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4618 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4619 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4620 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4621 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4622 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4623 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4624 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4625 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4626 // CHECK3-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
4627 // CHECK3-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
4628 // CHECK3-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
4629 // CHECK3-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
4630 // CHECK3-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
4631 // CHECK3-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
4632 // CHECK3-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
4633 // CHECK3-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
4634 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
4635 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
4636 // CHECK3-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
4637 // CHECK3-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
4638 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
4639 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
4640 // CHECK3-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
4641 // CHECK3-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
4642 // CHECK3-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 0
4643 // CHECK3-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
4644 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4645 // CHECK3-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
4646 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4647 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4648 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4649 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
4650 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4651 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4652 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
4653 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4654 // CHECK3:       cond.true:
4655 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4656 // CHECK3:       cond.false:
4657 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4658 // CHECK3-NEXT:    br label [[COND_END]]
4659 // CHECK3:       cond.end:
4660 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
4661 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4662 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4663 // CHECK3-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
4664 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4665 // CHECK3:       omp.inner.for.cond:
4666 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
4667 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
4668 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
4669 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4670 // CHECK3:       omp.inner.for.body:
4671 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
4672 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
4673 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4674 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !36
4675 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36
4676 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
4677 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[A_ADDR]], align 4, !llvm.access.group !36
4678 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
4679 // CHECK3-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36
4680 // CHECK3-NEXT:    [[CONV:%.*]] = fpext float [[TMP17]] to double
4681 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00
4682 // CHECK3-NEXT:    [[CONV8:%.*]] = fptrunc double [[ADD7]] to float
4683 // CHECK3-NEXT:    store float [[CONV8]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36
4684 // CHECK3-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
4685 // CHECK3-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4, !llvm.access.group !36
4686 // CHECK3-NEXT:    [[CONV10:%.*]] = fpext float [[TMP18]] to double
4687 // CHECK3-NEXT:    [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00
4688 // CHECK3-NEXT:    [[CONV12:%.*]] = fptrunc double [[ADD11]] to float
4689 // CHECK3-NEXT:    store float [[CONV12]], float* [[ARRAYIDX9]], align 4, !llvm.access.group !36
4690 // CHECK3-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
4691 // CHECK3-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX13]], i32 0, i32 2
4692 // CHECK3-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !36
4693 // CHECK3-NEXT:    [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00
4694 // CHECK3-NEXT:    store double [[ADD15]], double* [[ARRAYIDX14]], align 8, !llvm.access.group !36
4695 // CHECK3-NEXT:    [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]]
4696 // CHECK3-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP20]]
4697 // CHECK3-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX16]], i32 3
4698 // CHECK3-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !36
4699 // CHECK3-NEXT:    [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00
4700 // CHECK3-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !36
4701 // CHECK3-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
4702 // CHECK3-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36
4703 // CHECK3-NEXT:    [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1
4704 // CHECK3-NEXT:    store i64 [[ADD19]], i64* [[X]], align 4, !llvm.access.group !36
4705 // CHECK3-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
4706 // CHECK3-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36
4707 // CHECK3-NEXT:    [[CONV20:%.*]] = sext i8 [[TMP23]] to i32
4708 // CHECK3-NEXT:    [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1
4709 // CHECK3-NEXT:    [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8
4710 // CHECK3-NEXT:    store i8 [[CONV22]], i8* [[Y]], align 4, !llvm.access.group !36
4711 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4712 // CHECK3:       omp.body.continue:
4713 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4714 // CHECK3:       omp.inner.for.inc:
4715 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
4716 // CHECK3-NEXT:    [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1
4717 // CHECK3-NEXT:    store i32 [[ADD23]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
4718 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
4719 // CHECK3:       omp.inner.for.end:
4720 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4721 // CHECK3:       omp.loop.exit:
4722 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
4723 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4724 // CHECK3-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
4725 // CHECK3-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4726 // CHECK3:       .omp.final.then:
4727 // CHECK3-NEXT:    store i32 10, i32* [[I]], align 4
4728 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4729 // CHECK3:       .omp.final.done:
4730 // CHECK3-NEXT:    ret void
4731 //
4732 //
4733 // CHECK3-LABEL: define {{[^@]+}}@_Z3bari
4734 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
4735 // CHECK3-NEXT:  entry:
4736 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4737 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
4738 // CHECK3-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
4739 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4740 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
4741 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
4742 // CHECK3-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
4743 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
4744 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
4745 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
4746 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
4747 // CHECK3-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
4748 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
4749 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
4750 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
4751 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
4752 // CHECK3-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
4753 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
4754 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
4755 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
4756 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
4757 // CHECK3-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
4758 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
4759 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
4760 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
4761 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
4762 // CHECK3-NEXT:    ret i32 [[TMP8]]
4763 //
4764 //
4765 // CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
4766 // CHECK3-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
4767 // CHECK3-NEXT:  entry:
4768 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
4769 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4770 // CHECK3-NEXT:    [[B:%.*]] = alloca i32, align 4
4771 // CHECK3-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
4772 // CHECK3-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
4773 // CHECK3-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
4774 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
4775 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
4776 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
4777 // CHECK3-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4
4778 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4779 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
4780 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4781 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
4782 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
4783 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
4784 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
4785 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
4786 // CHECK3-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
4787 // CHECK3-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
4788 // CHECK3-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
4789 // CHECK3-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
4790 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
4791 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B]], align 4
4792 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
4793 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
4794 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
4795 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60
4796 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
4797 // CHECK3:       omp_if.then:
4798 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
4799 // CHECK3-NEXT:    [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]]
4800 // CHECK3-NEXT:    [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2
4801 // CHECK3-NEXT:    [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
4802 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4803 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
4804 // CHECK3-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4
4805 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4806 // CHECK3-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
4807 // CHECK3-NEXT:    store double* [[A]], double** [[TMP13]], align 4
4808 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
4809 // CHECK3-NEXT:    store i64 8, i64* [[TMP14]], align 4
4810 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
4811 // CHECK3-NEXT:    store i8* null, i8** [[TMP15]], align 4
4812 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
4813 // CHECK3-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
4814 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP17]], align 4
4815 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
4816 // CHECK3-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
4817 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP19]], align 4
4818 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
4819 // CHECK3-NEXT:    store i64 4, i64* [[TMP20]], align 4
4820 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
4821 // CHECK3-NEXT:    store i8* null, i8** [[TMP21]], align 4
4822 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
4823 // CHECK3-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
4824 // CHECK3-NEXT:    store i32 2, i32* [[TMP23]], align 4
4825 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
4826 // CHECK3-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
4827 // CHECK3-NEXT:    store i32 2, i32* [[TMP25]], align 4
4828 // CHECK3-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
4829 // CHECK3-NEXT:    store i64 4, i64* [[TMP26]], align 4
4830 // CHECK3-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
4831 // CHECK3-NEXT:    store i8* null, i8** [[TMP27]], align 4
4832 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
4833 // CHECK3-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
4834 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP29]], align 4
4835 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
4836 // CHECK3-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32*
4837 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP31]], align 4
4838 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
4839 // CHECK3-NEXT:    store i64 4, i64* [[TMP32]], align 4
4840 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
4841 // CHECK3-NEXT:    store i8* null, i8** [[TMP33]], align 4
4842 // CHECK3-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
4843 // CHECK3-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
4844 // CHECK3-NEXT:    store i16* [[VLA]], i16** [[TMP35]], align 4
4845 // CHECK3-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
4846 // CHECK3-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
4847 // CHECK3-NEXT:    store i16* [[VLA]], i16** [[TMP37]], align 4
4848 // CHECK3-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
4849 // CHECK3-NEXT:    store i64 [[TMP9]], i64* [[TMP38]], align 4
4850 // CHECK3-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
4851 // CHECK3-NEXT:    store i8* null, i8** [[TMP39]], align 4
4852 // CHECK3-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4853 // CHECK3-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4854 // CHECK3-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
4855 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
4856 // CHECK3-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
4857 // CHECK3-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
4858 // CHECK3-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4859 // CHECK3:       omp_offload.failed:
4860 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]]
4861 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
4862 // CHECK3:       omp_offload.cont:
4863 // CHECK3-NEXT:    br label [[OMP_IF_END:%.*]]
4864 // CHECK3:       omp_if.else:
4865 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]]
4866 // CHECK3-NEXT:    br label [[OMP_IF_END]]
4867 // CHECK3:       omp_if.end:
4868 // CHECK3-NEXT:    [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]]
4869 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]]
4870 // CHECK3-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
4871 // CHECK3-NEXT:    [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
4872 // CHECK3-NEXT:    [[CONV:%.*]] = sext i16 [[TMP46]] to i32
4873 // CHECK3-NEXT:    [[TMP47:%.*]] = load i32, i32* [[B]], align 4
4874 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]]
4875 // CHECK3-NEXT:    [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
4876 // CHECK3-NEXT:    call void @llvm.stackrestore(i8* [[TMP48]])
4877 // CHECK3-NEXT:    ret i32 [[ADD3]]
4878 //
4879 //
4880 // CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici
4881 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
4882 // CHECK3-NEXT:  entry:
4883 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4884 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
4885 // CHECK3-NEXT:    [[AA:%.*]] = alloca i16, align 2
4886 // CHECK3-NEXT:    [[AAA:%.*]] = alloca i8, align 1
4887 // CHECK3-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
4888 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4889 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
4890 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4891 // CHECK3-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
4892 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
4893 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
4894 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
4895 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4896 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4897 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4898 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
4899 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4900 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
4901 // CHECK3-NEXT:    store i16 0, i16* [[AA]], align 2
4902 // CHECK3-NEXT:    store i8 0, i8* [[AAA]], align 1
4903 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
4904 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
4905 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
4906 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
4907 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
4908 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
4909 // CHECK3-NEXT:    [[TMP4:%.*]] = load i16, i16* [[AA]], align 2
4910 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4911 // CHECK3-NEXT:    store i16 [[TMP4]], i16* [[CONV]], align 2
4912 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4913 // CHECK3-NEXT:    [[TMP6:%.*]] = load i8, i8* [[AAA]], align 1
4914 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
4915 // CHECK3-NEXT:    store i8 [[TMP6]], i8* [[CONV1]], align 1
4916 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
4917 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[N_ADDR]], align 4
4918 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50
4919 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
4920 // CHECK3:       omp_if.then:
4921 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4922 // CHECK3-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
4923 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
4924 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4925 // CHECK3-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32*
4926 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP12]], align 4
4927 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
4928 // CHECK3-NEXT:    store i8* null, i8** [[TMP13]], align 4
4929 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
4930 // CHECK3-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
4931 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
4932 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
4933 // CHECK3-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
4934 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP17]], align 4
4935 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
4936 // CHECK3-NEXT:    store i8* null, i8** [[TMP18]], align 4
4937 // CHECK3-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
4938 // CHECK3-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
4939 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP20]], align 4
4940 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
4941 // CHECK3-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
4942 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP22]], align 4
4943 // CHECK3-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
4944 // CHECK3-NEXT:    store i8* null, i8** [[TMP23]], align 4
4945 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
4946 // CHECK3-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
4947 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[TMP25]], align 4
4948 // CHECK3-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
4949 // CHECK3-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
4950 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[TMP27]], align 4
4951 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
4952 // CHECK3-NEXT:    store i8* null, i8** [[TMP28]], align 4
4953 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
4954 // CHECK3-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [10 x i32]**
4955 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP30]], align 4
4956 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
4957 // CHECK3-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [10 x i32]**
4958 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP32]], align 4
4959 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
4960 // CHECK3-NEXT:    store i8* null, i8** [[TMP33]], align 4
4961 // CHECK3-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4962 // CHECK3-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4963 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
4964 // CHECK3-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_]], align 4
4965 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N_ADDR]], align 4
4966 // CHECK3-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4967 // CHECK3-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4968 // CHECK3-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4969 // CHECK3-NEXT:    [[SUB:%.*]] = sub i32 [[TMP38]], [[TMP39]]
4970 // CHECK3-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
4971 // CHECK3-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
4972 // CHECK3-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
4973 // CHECK3-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
4974 // CHECK3-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
4975 // CHECK3-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
4976 // CHECK3-NEXT:    [[ADD6:%.*]] = add i32 [[TMP40]], 1
4977 // CHECK3-NEXT:    [[TMP41:%.*]] = zext i32 [[ADD6]] to i64
4978 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]])
4979 // CHECK3-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
4980 // CHECK3-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
4981 // CHECK3-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4982 // CHECK3:       omp_offload.failed:
4983 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
4984 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
4985 // CHECK3:       omp_offload.cont:
4986 // CHECK3-NEXT:    br label [[OMP_IF_END:%.*]]
4987 // CHECK3:       omp_if.else:
4988 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
4989 // CHECK3-NEXT:    br label [[OMP_IF_END]]
4990 // CHECK3:       omp_if.end:
4991 // CHECK3-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
4992 // CHECK3-NEXT:    ret i32 [[TMP44]]
4993 //
4994 //
4995 // CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
4996 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
4997 // CHECK3-NEXT:  entry:
4998 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4999 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
5000 // CHECK3-NEXT:    [[AA:%.*]] = alloca i16, align 2
5001 // CHECK3-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
5002 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
5003 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
5004 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
5005 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
5006 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
5007 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5008 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5009 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
5010 // CHECK3-NEXT:    store i16 0, i16* [[AA]], align 2
5011 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
5012 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
5013 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
5014 // CHECK3-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
5015 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
5016 // CHECK3-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
5017 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
5018 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
5019 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
5020 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
5021 // CHECK3:       omp_if.then:
5022 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5023 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
5024 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
5025 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5026 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
5027 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
5028 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
5029 // CHECK3-NEXT:    store i8* null, i8** [[TMP9]], align 4
5030 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
5031 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
5032 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
5033 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
5034 // CHECK3-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
5035 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
5036 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
5037 // CHECK3-NEXT:    store i8* null, i8** [[TMP14]], align 4
5038 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
5039 // CHECK3-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
5040 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
5041 // CHECK3-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
5042 // CHECK3-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
5043 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
5044 // CHECK3-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
5045 // CHECK3-NEXT:    store i8* null, i8** [[TMP19]], align 4
5046 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5047 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5048 // CHECK3-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
5049 // CHECK3-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
5050 // CHECK3-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
5051 // CHECK3-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5052 // CHECK3:       omp_offload.failed:
5053 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
5054 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
5055 // CHECK3:       omp_offload.cont:
5056 // CHECK3-NEXT:    br label [[OMP_IF_END:%.*]]
5057 // CHECK3:       omp_if.else:
5058 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
5059 // CHECK3-NEXT:    br label [[OMP_IF_END]]
5060 // CHECK3:       omp_if.end:
5061 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
5062 // CHECK3-NEXT:    ret i32 [[TMP24]]
5063 //
5064 //
5065 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
5066 // CHECK3-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
5067 // CHECK3-NEXT:  entry:
5068 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
5069 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
5070 // CHECK3-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
5071 // CHECK3-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
5072 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
5073 // CHECK3-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
5074 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
5075 // CHECK3-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
5076 // CHECK3-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
5077 // CHECK3-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
5078 // CHECK3-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
5079 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
5080 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
5081 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
5082 // CHECK3-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
5083 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
5084 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
5085 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
5086 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
5087 // CHECK3-NEXT:    ret void
5088 //
5089 //
5090 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11
5091 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] {
5092 // CHECK3-NEXT:  entry:
5093 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5094 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5095 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
5096 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
5097 // CHECK3-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
5098 // CHECK3-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
5099 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
5100 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5101 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5102 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5103 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5104 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5105 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5106 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5107 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5108 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5109 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
5110 // CHECK3-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
5111 // CHECK3-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
5112 // CHECK3-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
5113 // CHECK3-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
5114 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
5115 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
5116 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
5117 // CHECK3-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
5118 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5119 // CHECK3-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
5120 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5121 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5122 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5123 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
5124 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5125 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5126 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
5127 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5128 // CHECK3:       cond.true:
5129 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5130 // CHECK3:       cond.false:
5131 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5132 // CHECK3-NEXT:    br label [[COND_END]]
5133 // CHECK3:       cond.end:
5134 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
5135 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5136 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5137 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
5138 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5139 // CHECK3:       omp.inner.for.cond:
5140 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
5141 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39
5142 // CHECK3-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
5143 // CHECK3-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5144 // CHECK3:       omp.inner.for.body:
5145 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
5146 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
5147 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5148 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39
5149 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39
5150 // CHECK3-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP12]] to double
5151 // CHECK3-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
5152 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
5153 // CHECK3-NEXT:    store double [[ADD4]], double* [[A]], align 4, !llvm.access.group !39
5154 // CHECK3-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
5155 // CHECK3-NEXT:    [[TMP13:%.*]] = load double, double* [[A5]], align 4, !llvm.access.group !39
5156 // CHECK3-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
5157 // CHECK3-NEXT:    store double [[INC]], double* [[A5]], align 4, !llvm.access.group !39
5158 // CHECK3-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
5159 // CHECK3-NEXT:    [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]]
5160 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]]
5161 // CHECK3-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
5162 // CHECK3-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !39
5163 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5164 // CHECK3:       omp.body.continue:
5165 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5166 // CHECK3:       omp.inner.for.inc:
5167 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
5168 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1
5169 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
5170 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
5171 // CHECK3:       omp.inner.for.end:
5172 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5173 // CHECK3:       omp.loop.exit:
5174 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
5175 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5176 // CHECK3-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
5177 // CHECK3-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5178 // CHECK3:       .omp.final.then:
5179 // CHECK3-NEXT:    store i32 10, i32* [[I]], align 4
5180 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5181 // CHECK3:       .omp.final.done:
5182 // CHECK3-NEXT:    ret void
5183 //
5184 //
5185 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
5186 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
5187 // CHECK3-NEXT:  entry:
5188 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5189 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5190 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5191 // CHECK3-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
5192 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
5193 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
5194 // CHECK3-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
5195 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
5196 // CHECK3-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
5197 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5198 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5199 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5200 // CHECK3-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
5201 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
5202 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5203 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
5204 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
5205 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
5206 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
5207 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
5208 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
5209 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
5210 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
5211 // CHECK3-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
5212 // CHECK3-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
5213 // CHECK3-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
5214 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AA_CASTED]], align 4
5215 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV1]], align 1
5216 // CHECK3-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
5217 // CHECK3-NEXT:    store i8 [[TMP7]], i8* [[CONV3]], align 1
5218 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
5219 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]])
5220 // CHECK3-NEXT:    ret void
5221 //
5222 //
5223 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13
5224 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
5225 // CHECK3-NEXT:  entry:
5226 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5227 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5228 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5229 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5230 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5231 // CHECK3-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
5232 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
5233 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5234 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5235 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5236 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5237 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
5238 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5239 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5240 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5241 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5242 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5243 // CHECK3-NEXT:    [[I6:%.*]] = alloca i32, align 4
5244 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5245 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5246 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5247 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5248 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5249 // CHECK3-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
5250 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
5251 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5252 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
5253 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
5254 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
5255 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
5256 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
5257 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
5258 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5259 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5260 // CHECK3-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
5261 // CHECK3-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
5262 // CHECK3-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
5263 // CHECK3-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
5264 // CHECK3-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
5265 // CHECK3-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
5266 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5267 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
5268 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5269 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5270 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
5271 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5272 // CHECK3:       omp.precond.then:
5273 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5274 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
5275 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
5276 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5277 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5278 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5279 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
5280 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5281 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5282 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
5283 // CHECK3-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
5284 // CHECK3-NEXT:    br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5285 // CHECK3:       cond.true:
5286 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
5287 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5288 // CHECK3:       cond.false:
5289 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5290 // CHECK3-NEXT:    br label [[COND_END]]
5291 // CHECK3:       cond.end:
5292 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
5293 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5294 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5295 // CHECK3-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
5296 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5297 // CHECK3:       omp.inner.for.cond:
5298 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
5299 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !42
5300 // CHECK3-NEXT:    [[ADD8:%.*]] = add i32 [[TMP17]], 1
5301 // CHECK3-NEXT:    [[CMP9:%.*]] = icmp ult i32 [[TMP16]], [[ADD8]]
5302 // CHECK3-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5303 // CHECK3:       omp.inner.for.body:
5304 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !42
5305 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
5306 // CHECK3-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
5307 // CHECK3-NEXT:    [[ADD10:%.*]] = add i32 [[TMP18]], [[MUL]]
5308 // CHECK3-NEXT:    store i32 [[ADD10]], i32* [[I6]], align 4, !llvm.access.group !42
5309 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !42
5310 // CHECK3-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP20]], 1
5311 // CHECK3-NEXT:    store i32 [[ADD11]], i32* [[A_ADDR]], align 4, !llvm.access.group !42
5312 // CHECK3-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !42
5313 // CHECK3-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP21]] to i32
5314 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
5315 // CHECK3-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
5316 // CHECK3-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !42
5317 // CHECK3-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV1]], align 1, !llvm.access.group !42
5318 // CHECK3-NEXT:    [[CONV15:%.*]] = sext i8 [[TMP22]] to i32
5319 // CHECK3-NEXT:    [[ADD16:%.*]] = add nsw i32 [[CONV15]], 1
5320 // CHECK3-NEXT:    [[CONV17:%.*]] = trunc i32 [[ADD16]] to i8
5321 // CHECK3-NEXT:    store i8 [[CONV17]], i8* [[CONV1]], align 1, !llvm.access.group !42
5322 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
5323 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !42
5324 // CHECK3-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP23]], 1
5325 // CHECK3-NEXT:    store i32 [[ADD18]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !42
5326 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5327 // CHECK3:       omp.body.continue:
5328 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5329 // CHECK3:       omp.inner.for.inc:
5330 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
5331 // CHECK3-NEXT:    [[ADD19:%.*]] = add i32 [[TMP24]], 1
5332 // CHECK3-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
5333 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
5334 // CHECK3:       omp.inner.for.end:
5335 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5336 // CHECK3:       omp.loop.exit:
5337 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5338 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
5339 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
5340 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5341 // CHECK3-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
5342 // CHECK3-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5343 // CHECK3:       .omp.final.then:
5344 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5345 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5346 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5347 // CHECK3-NEXT:    [[SUB20:%.*]] = sub i32 [[TMP30]], [[TMP31]]
5348 // CHECK3-NEXT:    [[SUB21:%.*]] = sub i32 [[SUB20]], 1
5349 // CHECK3-NEXT:    [[ADD22:%.*]] = add i32 [[SUB21]], 1
5350 // CHECK3-NEXT:    [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
5351 // CHECK3-NEXT:    [[MUL24:%.*]] = mul i32 [[DIV23]], 1
5352 // CHECK3-NEXT:    [[ADD25:%.*]] = add i32 [[TMP29]], [[MUL24]]
5353 // CHECK3-NEXT:    store i32 [[ADD25]], i32* [[I6]], align 4
5354 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5355 // CHECK3:       .omp.final.done:
5356 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
5357 // CHECK3:       omp.precond.end:
5358 // CHECK3-NEXT:    ret void
5359 //
5360 //
5361 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
5362 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
5363 // CHECK3-NEXT:  entry:
5364 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5365 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5366 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
5367 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
5368 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
5369 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5370 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5371 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
5372 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5373 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
5374 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
5375 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
5376 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
5377 // CHECK3-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
5378 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
5379 // CHECK3-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
5380 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
5381 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
5382 // CHECK3-NEXT:    ret void
5383 //
5384 //
5385 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16
5386 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
5387 // CHECK3-NEXT:  entry:
5388 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5389 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5390 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5391 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5392 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
5393 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5394 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5395 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5396 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5397 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5398 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5399 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
5400 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5401 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5402 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5403 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5404 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
5405 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5406 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
5407 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5408 // CHECK3-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
5409 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5410 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5411 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5412 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
5413 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5414 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5415 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
5416 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5417 // CHECK3:       cond.true:
5418 // CHECK3-NEXT:    br label [[COND_END:%.*]]
5419 // CHECK3:       cond.false:
5420 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5421 // CHECK3-NEXT:    br label [[COND_END]]
5422 // CHECK3:       cond.end:
5423 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
5424 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5425 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5426 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5427 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5428 // CHECK3:       omp.inner.for.cond:
5429 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
5430 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !45
5431 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
5432 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5433 // CHECK3:       omp.inner.for.body:
5434 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
5435 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
5436 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5437 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !45
5438 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !45
5439 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
5440 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !45
5441 // CHECK3-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !45
5442 // CHECK3-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
5443 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
5444 // CHECK3-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
5445 // CHECK3-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !45
5446 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
5447 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
5448 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
5449 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
5450 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5451 // CHECK3:       omp.body.continue:
5452 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5453 // CHECK3:       omp.inner.for.inc:
5454 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
5455 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
5456 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
5457 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
5458 // CHECK3:       omp.inner.for.end:
5459 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5460 // CHECK3:       omp.loop.exit:
5461 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
5462 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5463 // CHECK3-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5464 // CHECK3-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5465 // CHECK3:       .omp.final.then:
5466 // CHECK3-NEXT:    store i32 10, i32* [[I]], align 4
5467 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5468 // CHECK3:       .omp.final.done:
5469 // CHECK3-NEXT:    ret void
5470 //
5471 //
5472 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
5473 // CHECK3-SAME: () #[[ATTR5]] {
5474 // CHECK3-NEXT:  entry:
5475 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
5476 // CHECK3-NEXT:    ret void
5477 //
5478 //
5479 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooi
5480 // CHECK4-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
5481 // CHECK4-NEXT:  entry:
5482 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5483 // CHECK4-NEXT:    [[A:%.*]] = alloca i32, align 4
5484 // CHECK4-NEXT:    [[AA:%.*]] = alloca i16, align 2
5485 // CHECK4-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
5486 // CHECK4-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
5487 // CHECK4-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
5488 // CHECK4-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
5489 // CHECK4-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
5490 // CHECK4-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
5491 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5492 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5493 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
5494 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
5495 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED3:%.*]] = alloca i32, align 4
5496 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
5497 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
5498 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
5499 // CHECK4-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
5500 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
5501 // CHECK4-NEXT:    [[AA_CASTED4:%.*]] = alloca i32, align 4
5502 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [1 x i8*], align 4
5503 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS7:%.*]] = alloca [1 x i8*], align 4
5504 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [1 x i8*], align 4
5505 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5506 // CHECK4-NEXT:    [[A_CASTED9:%.*]] = alloca i32, align 4
5507 // CHECK4-NEXT:    [[AA_CASTED10:%.*]] = alloca i32, align 4
5508 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS12:%.*]] = alloca [2 x i8*], align 4
5509 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS13:%.*]] = alloca [2 x i8*], align 4
5510 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS14:%.*]] = alloca [2 x i8*], align 4
5511 // CHECK4-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
5512 // CHECK4-NEXT:    [[A_CASTED18:%.*]] = alloca i32, align 4
5513 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [9 x i8*], align 4
5514 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS22:%.*]] = alloca [9 x i8*], align 4
5515 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [9 x i8*], align 4
5516 // CHECK4-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4
5517 // CHECK4-NEXT:    [[_TMP24:%.*]] = alloca i32, align 4
5518 // CHECK4-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
5519 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5520 // CHECK4-NEXT:    store i32 0, i32* [[A]], align 4
5521 // CHECK4-NEXT:    store i16 0, i16* [[AA]], align 2
5522 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
5523 // CHECK4-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
5524 // CHECK4-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
5525 // CHECK4-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
5526 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
5527 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
5528 // CHECK4-NEXT:    [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
5529 // CHECK4-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
5530 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
5531 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
5532 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
5533 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
5534 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_2]], align 4
5535 // CHECK4-NEXT:    [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
5536 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
5537 // CHECK4-NEXT:    store i16 [[TMP7]], i16* [[CONV]], align 2
5538 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AA_CASTED]], align 4
5539 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5540 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
5541 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
5542 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5543 // CHECK4-NEXT:    store i32 [[TMP11]], i32* [[DOTCAPTURE_EXPR__CASTED3]], align 4
5544 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED3]], align 4
5545 // CHECK4-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5546 // CHECK4-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32*
5547 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[TMP14]], align 4
5548 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5549 // CHECK4-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32*
5550 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[TMP16]], align 4
5551 // CHECK4-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
5552 // CHECK4-NEXT:    store i8* null, i8** [[TMP17]], align 4
5553 // CHECK4-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
5554 // CHECK4-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
5555 // CHECK4-NEXT:    store i32 [[TMP10]], i32* [[TMP19]], align 4
5556 // CHECK4-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
5557 // CHECK4-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32*
5558 // CHECK4-NEXT:    store i32 [[TMP10]], i32* [[TMP21]], align 4
5559 // CHECK4-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
5560 // CHECK4-NEXT:    store i8* null, i8** [[TMP22]], align 4
5561 // CHECK4-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
5562 // CHECK4-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32*
5563 // CHECK4-NEXT:    store i32 [[TMP12]], i32* [[TMP24]], align 4
5564 // CHECK4-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
5565 // CHECK4-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32*
5566 // CHECK4-NEXT:    store i32 [[TMP12]], i32* [[TMP26]], align 4
5567 // CHECK4-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
5568 // CHECK4-NEXT:    store i8* null, i8** [[TMP27]], align 4
5569 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5570 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5571 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
5572 // CHECK4-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2
5573 // CHECK4-NEXT:    store i16 [[TMP31]], i16* [[TMP30]], align 4
5574 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
5575 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5576 // CHECK4-NEXT:    store i32 [[TMP33]], i32* [[TMP32]], align 4
5577 // CHECK4-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
5578 // CHECK4-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
5579 // CHECK4-NEXT:    store i32 [[TMP35]], i32* [[TMP34]], align 4
5580 // CHECK4-NEXT:    [[TMP36:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
5581 // CHECK4-NEXT:    [[TMP37:%.*]] = bitcast i8* [[TMP36]] to %struct.kmp_task_t_with_privates*
5582 // CHECK4-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP37]], i32 0, i32 0
5583 // CHECK4-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP38]], i32 0, i32 0
5584 // CHECK4-NEXT:    [[TMP40:%.*]] = load i8*, i8** [[TMP39]], align 4
5585 // CHECK4-NEXT:    [[TMP41:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
5586 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP40]], i8* align 4 [[TMP41]], i32 12, i1 false)
5587 // CHECK4-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP37]], i32 0, i32 1
5588 // CHECK4-NEXT:    [[TMP43:%.*]] = bitcast i8* [[TMP40]] to %struct.anon*
5589 // CHECK4-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 0
5590 // CHECK4-NEXT:    [[TMP45:%.*]] = bitcast [3 x i64]* [[TMP44]] to i8*
5591 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP45]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false)
5592 // CHECK4-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 1
5593 // CHECK4-NEXT:    [[TMP47:%.*]] = bitcast [3 x i8*]* [[TMP46]] to i8*
5594 // CHECK4-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP28]] to i8*
5595 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP47]], i8* align 4 [[TMP48]], i32 12, i1 false)
5596 // CHECK4-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 2
5597 // CHECK4-NEXT:    [[TMP50:%.*]] = bitcast [3 x i8*]* [[TMP49]] to i8*
5598 // CHECK4-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP29]] to i8*
5599 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP50]], i8* align 4 [[TMP51]], i32 12, i1 false)
5600 // CHECK4-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 3
5601 // CHECK4-NEXT:    [[TMP53:%.*]] = load i16, i16* [[AA]], align 2
5602 // CHECK4-NEXT:    store i16 [[TMP53]], i16* [[TMP52]], align 4
5603 // CHECK4-NEXT:    [[TMP54:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP36]])
5604 // CHECK4-NEXT:    [[TMP55:%.*]] = load i32, i32* [[A]], align 4
5605 // CHECK4-NEXT:    store i32 [[TMP55]], i32* [[A_CASTED]], align 4
5606 // CHECK4-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A_CASTED]], align 4
5607 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i32 [[TMP56]]) #[[ATTR4:[0-9]+]]
5608 // CHECK4-NEXT:    [[TMP57:%.*]] = load i16, i16* [[AA]], align 2
5609 // CHECK4-NEXT:    [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16*
5610 // CHECK4-NEXT:    store i16 [[TMP57]], i16* [[CONV5]], align 2
5611 // CHECK4-NEXT:    [[TMP58:%.*]] = load i32, i32* [[AA_CASTED4]], align 4
5612 // CHECK4-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
5613 // CHECK4-NEXT:    [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32*
5614 // CHECK4-NEXT:    store i32 [[TMP58]], i32* [[TMP60]], align 4
5615 // CHECK4-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
5616 // CHECK4-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32*
5617 // CHECK4-NEXT:    store i32 [[TMP58]], i32* [[TMP62]], align 4
5618 // CHECK4-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0
5619 // CHECK4-NEXT:    store i8* null, i8** [[TMP63]], align 4
5620 // CHECK4-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
5621 // CHECK4-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
5622 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
5623 // CHECK4-NEXT:    [[TMP66:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111.region_id, i32 1, i8** [[TMP64]], i8** [[TMP65]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
5624 // CHECK4-NEXT:    [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0
5625 // CHECK4-NEXT:    br i1 [[TMP67]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5626 // CHECK4:       omp_offload.failed:
5627 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i32 [[TMP58]]) #[[ATTR4]]
5628 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
5629 // CHECK4:       omp_offload.cont:
5630 // CHECK4-NEXT:    [[TMP68:%.*]] = load i32, i32* [[A]], align 4
5631 // CHECK4-NEXT:    store i32 [[TMP68]], i32* [[A_CASTED9]], align 4
5632 // CHECK4-NEXT:    [[TMP69:%.*]] = load i32, i32* [[A_CASTED9]], align 4
5633 // CHECK4-NEXT:    [[TMP70:%.*]] = load i16, i16* [[AA]], align 2
5634 // CHECK4-NEXT:    [[CONV11:%.*]] = bitcast i32* [[AA_CASTED10]] to i16*
5635 // CHECK4-NEXT:    store i16 [[TMP70]], i16* [[CONV11]], align 2
5636 // CHECK4-NEXT:    [[TMP71:%.*]] = load i32, i32* [[AA_CASTED10]], align 4
5637 // CHECK4-NEXT:    [[TMP72:%.*]] = load i32, i32* [[N_ADDR]], align 4
5638 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP72]], 10
5639 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
5640 // CHECK4:       omp_if.then:
5641 // CHECK4-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
5642 // CHECK4-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
5643 // CHECK4-NEXT:    store i32 [[TMP69]], i32* [[TMP74]], align 4
5644 // CHECK4-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
5645 // CHECK4-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
5646 // CHECK4-NEXT:    store i32 [[TMP69]], i32* [[TMP76]], align 4
5647 // CHECK4-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i32 0, i32 0
5648 // CHECK4-NEXT:    store i8* null, i8** [[TMP77]], align 4
5649 // CHECK4-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 1
5650 // CHECK4-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
5651 // CHECK4-NEXT:    store i32 [[TMP71]], i32* [[TMP79]], align 4
5652 // CHECK4-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 1
5653 // CHECK4-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
5654 // CHECK4-NEXT:    store i32 [[TMP71]], i32* [[TMP81]], align 4
5655 // CHECK4-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i32 0, i32 1
5656 // CHECK4-NEXT:    store i8* null, i8** [[TMP82]], align 4
5657 // CHECK4-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
5658 // CHECK4-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
5659 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
5660 // CHECK4-NEXT:    [[TMP85:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118.region_id, i32 2, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
5661 // CHECK4-NEXT:    [[TMP86:%.*]] = icmp ne i32 [[TMP85]], 0
5662 // CHECK4-NEXT:    br i1 [[TMP86]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
5663 // CHECK4:       omp_offload.failed16:
5664 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP69]], i32 [[TMP71]]) #[[ATTR4]]
5665 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT17]]
5666 // CHECK4:       omp_offload.cont17:
5667 // CHECK4-NEXT:    br label [[OMP_IF_END:%.*]]
5668 // CHECK4:       omp_if.else:
5669 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP69]], i32 [[TMP71]]) #[[ATTR4]]
5670 // CHECK4-NEXT:    br label [[OMP_IF_END]]
5671 // CHECK4:       omp_if.end:
5672 // CHECK4-NEXT:    [[TMP87:%.*]] = load i32, i32* [[A]], align 4
5673 // CHECK4-NEXT:    store i32 [[TMP87]], i32* [[A_CASTED18]], align 4
5674 // CHECK4-NEXT:    [[TMP88:%.*]] = load i32, i32* [[A_CASTED18]], align 4
5675 // CHECK4-NEXT:    [[TMP89:%.*]] = load i32, i32* [[N_ADDR]], align 4
5676 // CHECK4-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[TMP89]], 20
5677 // CHECK4-NEXT:    br i1 [[CMP19]], label [[OMP_IF_THEN20:%.*]], label [[OMP_IF_ELSE27:%.*]]
5678 // CHECK4:       omp_if.then20:
5679 // CHECK4-NEXT:    [[TMP90:%.*]] = mul nuw i32 [[TMP1]], 4
5680 // CHECK4-NEXT:    [[TMP91:%.*]] = sext i32 [[TMP90]] to i64
5681 // CHECK4-NEXT:    [[TMP92:%.*]] = mul nuw i32 5, [[TMP3]]
5682 // CHECK4-NEXT:    [[TMP93:%.*]] = mul nuw i32 [[TMP92]], 8
5683 // CHECK4-NEXT:    [[TMP94:%.*]] = sext i32 [[TMP93]] to i64
5684 // CHECK4-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
5685 // CHECK4-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32*
5686 // CHECK4-NEXT:    store i32 [[TMP88]], i32* [[TMP96]], align 4
5687 // CHECK4-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
5688 // CHECK4-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32*
5689 // CHECK4-NEXT:    store i32 [[TMP88]], i32* [[TMP98]], align 4
5690 // CHECK4-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
5691 // CHECK4-NEXT:    store i64 4, i64* [[TMP99]], align 4
5692 // CHECK4-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
5693 // CHECK4-NEXT:    store i8* null, i8** [[TMP100]], align 4
5694 // CHECK4-NEXT:    [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
5695 // CHECK4-NEXT:    [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]**
5696 // CHECK4-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4
5697 // CHECK4-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
5698 // CHECK4-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]**
5699 // CHECK4-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4
5700 // CHECK4-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
5701 // CHECK4-NEXT:    store i64 40, i64* [[TMP105]], align 4
5702 // CHECK4-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
5703 // CHECK4-NEXT:    store i8* null, i8** [[TMP106]], align 4
5704 // CHECK4-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2
5705 // CHECK4-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32*
5706 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP108]], align 4
5707 // CHECK4-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2
5708 // CHECK4-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32*
5709 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP110]], align 4
5710 // CHECK4-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
5711 // CHECK4-NEXT:    store i64 4, i64* [[TMP111]], align 4
5712 // CHECK4-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2
5713 // CHECK4-NEXT:    store i8* null, i8** [[TMP112]], align 4
5714 // CHECK4-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3
5715 // CHECK4-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float**
5716 // CHECK4-NEXT:    store float* [[VLA]], float** [[TMP114]], align 4
5717 // CHECK4-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3
5718 // CHECK4-NEXT:    [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float**
5719 // CHECK4-NEXT:    store float* [[VLA]], float** [[TMP116]], align 4
5720 // CHECK4-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
5721 // CHECK4-NEXT:    store i64 [[TMP91]], i64* [[TMP117]], align 4
5722 // CHECK4-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3
5723 // CHECK4-NEXT:    store i8* null, i8** [[TMP118]], align 4
5724 // CHECK4-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4
5725 // CHECK4-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]**
5726 // CHECK4-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4
5727 // CHECK4-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4
5728 // CHECK4-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]**
5729 // CHECK4-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4
5730 // CHECK4-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
5731 // CHECK4-NEXT:    store i64 400, i64* [[TMP123]], align 4
5732 // CHECK4-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4
5733 // CHECK4-NEXT:    store i8* null, i8** [[TMP124]], align 4
5734 // CHECK4-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5
5735 // CHECK4-NEXT:    [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32*
5736 // CHECK4-NEXT:    store i32 5, i32* [[TMP126]], align 4
5737 // CHECK4-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5
5738 // CHECK4-NEXT:    [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32*
5739 // CHECK4-NEXT:    store i32 5, i32* [[TMP128]], align 4
5740 // CHECK4-NEXT:    [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
5741 // CHECK4-NEXT:    store i64 4, i64* [[TMP129]], align 4
5742 // CHECK4-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5
5743 // CHECK4-NEXT:    store i8* null, i8** [[TMP130]], align 4
5744 // CHECK4-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6
5745 // CHECK4-NEXT:    [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32*
5746 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP132]], align 4
5747 // CHECK4-NEXT:    [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6
5748 // CHECK4-NEXT:    [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32*
5749 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP134]], align 4
5750 // CHECK4-NEXT:    [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
5751 // CHECK4-NEXT:    store i64 4, i64* [[TMP135]], align 4
5752 // CHECK4-NEXT:    [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6
5753 // CHECK4-NEXT:    store i8* null, i8** [[TMP136]], align 4
5754 // CHECK4-NEXT:    [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7
5755 // CHECK4-NEXT:    [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double**
5756 // CHECK4-NEXT:    store double* [[VLA1]], double** [[TMP138]], align 4
5757 // CHECK4-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7
5758 // CHECK4-NEXT:    [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double**
5759 // CHECK4-NEXT:    store double* [[VLA1]], double** [[TMP140]], align 4
5760 // CHECK4-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
5761 // CHECK4-NEXT:    store i64 [[TMP94]], i64* [[TMP141]], align 4
5762 // CHECK4-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7
5763 // CHECK4-NEXT:    store i8* null, i8** [[TMP142]], align 4
5764 // CHECK4-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8
5765 // CHECK4-NEXT:    [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT**
5766 // CHECK4-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4
5767 // CHECK4-NEXT:    [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8
5768 // CHECK4-NEXT:    [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT**
5769 // CHECK4-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4
5770 // CHECK4-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
5771 // CHECK4-NEXT:    store i64 12, i64* [[TMP147]], align 4
5772 // CHECK4-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8
5773 // CHECK4-NEXT:    store i8* null, i8** [[TMP148]], align 4
5774 // CHECK4-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
5775 // CHECK4-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
5776 // CHECK4-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
5777 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
5778 // CHECK4-NEXT:    [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
5779 // CHECK4-NEXT:    [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0
5780 // CHECK4-NEXT:    br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
5781 // CHECK4:       omp_offload.failed25:
5782 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
5783 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT26]]
5784 // CHECK4:       omp_offload.cont26:
5785 // CHECK4-NEXT:    br label [[OMP_IF_END28:%.*]]
5786 // CHECK4:       omp_if.else27:
5787 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
5788 // CHECK4-NEXT:    br label [[OMP_IF_END28]]
5789 // CHECK4:       omp_if.end28:
5790 // CHECK4-NEXT:    [[TMP154:%.*]] = load i32, i32* [[A]], align 4
5791 // CHECK4-NEXT:    [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
5792 // CHECK4-NEXT:    call void @llvm.stackrestore(i8* [[TMP155]])
5793 // CHECK4-NEXT:    ret i32 [[TMP154]]
5794 //
5795 //
5796 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
5797 // CHECK4-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
5798 // CHECK4-NEXT:  entry:
5799 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5800 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
5801 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
5802 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
5803 // CHECK4-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
5804 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5805 // CHECK4-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5806 // CHECK4-NEXT:    store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
5807 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5808 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5809 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
5810 // CHECK4-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
5811 // CHECK4-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
5812 // CHECK4-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
5813 // CHECK4-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
5814 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
5815 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP4]])
5816 // CHECK4-NEXT:    ret void
5817 //
5818 //
5819 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
5820 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
5821 // CHECK4-NEXT:  entry:
5822 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5823 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5824 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5825 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5826 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5827 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5828 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5829 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5830 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5831 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
5832 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5833 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5834 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5835 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5836 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5837 // CHECK4-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
5838 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5839 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5840 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5841 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
5842 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5843 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5844 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
5845 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5846 // CHECK4:       cond.true:
5847 // CHECK4-NEXT:    br label [[COND_END:%.*]]
5848 // CHECK4:       cond.false:
5849 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5850 // CHECK4-NEXT:    br label [[COND_END]]
5851 // CHECK4:       cond.end:
5852 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
5853 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5854 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5855 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
5856 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5857 // CHECK4:       omp.inner.for.cond:
5858 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
5859 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
5860 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
5861 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5862 // CHECK4:       omp.inner.for.body:
5863 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
5864 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
5865 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5866 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
5867 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5868 // CHECK4:       omp.body.continue:
5869 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5870 // CHECK4:       omp.inner.for.inc:
5871 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
5872 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
5873 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
5874 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
5875 // CHECK4:       omp.inner.for.end:
5876 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5877 // CHECK4:       omp.loop.exit:
5878 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
5879 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5880 // CHECK4-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
5881 // CHECK4-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5882 // CHECK4:       .omp.final.then:
5883 // CHECK4-NEXT:    store i32 10, i32* [[I]], align 4
5884 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5885 // CHECK4:       .omp.final.done:
5886 // CHECK4-NEXT:    ret void
5887 //
5888 //
5889 // CHECK4-LABEL: define {{[^@]+}}@.omp_task_privates_map.
5890 // CHECK4-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i16** noalias noundef [[TMP1:%.*]], [3 x i8*]** noalias noundef [[TMP2:%.*]], [3 x i8*]** noalias noundef [[TMP3:%.*]], [3 x i64]** noalias noundef [[TMP4:%.*]]) #[[ATTR5:[0-9]+]] {
5891 // CHECK4-NEXT:  entry:
5892 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 4
5893 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca i16**, align 4
5894 // CHECK4-NEXT:    [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 4
5895 // CHECK4-NEXT:    [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 4
5896 // CHECK4-NEXT:    [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 4
5897 // CHECK4-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 4
5898 // CHECK4-NEXT:    store i16** [[TMP1]], i16*** [[DOTADDR1]], align 4
5899 // CHECK4-NEXT:    store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 4
5900 // CHECK4-NEXT:    store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 4
5901 // CHECK4-NEXT:    store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 4
5902 // CHECK4-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 4
5903 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0
5904 // CHECK4-NEXT:    [[TMP7:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 4
5905 // CHECK4-NEXT:    store [3 x i64]* [[TMP6]], [3 x i64]** [[TMP7]], align 4
5906 // CHECK4-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1
5907 // CHECK4-NEXT:    [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 4
5908 // CHECK4-NEXT:    store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 4
5909 // CHECK4-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2
5910 // CHECK4-NEXT:    [[TMP11:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 4
5911 // CHECK4-NEXT:    store [3 x i8*]* [[TMP10]], [3 x i8*]** [[TMP11]], align 4
5912 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3
5913 // CHECK4-NEXT:    [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 4
5914 // CHECK4-NEXT:    store i16* [[TMP12]], i16** [[TMP13]], align 4
5915 // CHECK4-NEXT:    ret void
5916 //
5917 //
5918 // CHECK4-LABEL: define {{[^@]+}}@.omp_task_entry.
5919 // CHECK4-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
5920 // CHECK4-NEXT:  entry:
5921 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
5922 // CHECK4-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
5923 // CHECK4-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
5924 // CHECK4-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
5925 // CHECK4-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
5926 // CHECK4-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
5927 // CHECK4-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 4
5928 // CHECK4-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 4
5929 // CHECK4-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 4
5930 // CHECK4-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 4
5931 // CHECK4-NEXT:    [[AA_CASTED_I:%.*]] = alloca i32, align 4
5932 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED_I:%.*]] = alloca i32, align 4
5933 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED4_I:%.*]] = alloca i32, align 4
5934 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
5935 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
5936 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
5937 // CHECK4-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
5938 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
5939 // CHECK4-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
5940 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
5941 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
5942 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
5943 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
5944 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
5945 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
5946 // CHECK4-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
5947 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
5948 // CHECK4-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
5949 // CHECK4-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
5950 // CHECK4-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]])
5951 // CHECK4-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]])
5952 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27
5953 // CHECK4-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !27
5954 // CHECK4-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
5955 // CHECK4-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
5956 // CHECK4-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !27
5957 // CHECK4-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
5958 // CHECK4-NEXT:    [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
5959 // CHECK4-NEXT:    [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
5960 // CHECK4-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
5961 // CHECK4-NEXT:    [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)*
5962 // CHECK4-NEXT:    call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
5963 // CHECK4-NEXT:    [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !27
5964 // CHECK4-NEXT:    [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !27
5965 // CHECK4-NEXT:    [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !27
5966 // CHECK4-NEXT:    [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !27
5967 // CHECK4-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i32 0, i32 0
5968 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i32 0, i32 0
5969 // CHECK4-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i32 0, i32 0
5970 // CHECK4-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
5971 // CHECK4-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
5972 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[TMP23]], align 4
5973 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) #[[ATTR4]]
5974 // CHECK4-NEXT:    [[TMP26:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 [[TMP25]], i32 1, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
5975 // CHECK4-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
5976 // CHECK4-NEXT:    br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
5977 // CHECK4:       omp_offload.failed.i:
5978 // CHECK4-NEXT:    [[TMP28:%.*]] = load i16, i16* [[TMP16]], align 2
5979 // CHECK4-NEXT:    [[CONV_I:%.*]] = bitcast i32* [[AA_CASTED_I]] to i16*
5980 // CHECK4-NEXT:    store i16 [[TMP28]], i16* [[CONV_I]], align 2, !noalias !27
5981 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[AA_CASTED_I]], align 4, !noalias !27
5982 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP23]], align 4
5983 // CHECK4-NEXT:    store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27
5984 // CHECK4-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27
5985 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP24]], align 4
5986 // CHECK4-NEXT:    store i32 [[TMP32]], i32* [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27
5987 // CHECK4-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27
5988 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i32 [[TMP29]], i32 [[TMP31]], i32 [[TMP33]]) #[[ATTR4]]
5989 // CHECK4-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
5990 // CHECK4:       .omp_outlined..1.exit:
5991 // CHECK4-NEXT:    ret i32 0
5992 //
5993 //
5994 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104
5995 // CHECK4-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
5996 // CHECK4-NEXT:  entry:
5997 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5998 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
5999 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6000 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
6001 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
6002 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
6003 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]])
6004 // CHECK4-NEXT:    ret void
6005 //
6006 //
6007 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2
6008 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
6009 // CHECK4-NEXT:  entry:
6010 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6011 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6012 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6013 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6014 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6015 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6016 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6017 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6018 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6019 // CHECK4-NEXT:    [[A1:%.*]] = alloca i32, align 4
6020 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6021 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6022 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6023 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6024 // CHECK4-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
6025 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6026 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6027 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6028 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
6029 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6030 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6031 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
6032 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6033 // CHECK4:       cond.true:
6034 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6035 // CHECK4:       cond.false:
6036 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6037 // CHECK4-NEXT:    br label [[COND_END]]
6038 // CHECK4:       cond.end:
6039 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
6040 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6041 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6042 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
6043 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6044 // CHECK4:       omp.inner.for.cond:
6045 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6046 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6047 // CHECK4-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
6048 // CHECK4-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6049 // CHECK4:       omp.inner.for.body:
6050 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6051 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
6052 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6053 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4
6054 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A1]], align 4
6055 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
6056 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[A1]], align 4
6057 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6058 // CHECK4:       omp.body.continue:
6059 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6060 // CHECK4:       omp.inner.for.inc:
6061 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6062 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1
6063 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
6064 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
6065 // CHECK4:       omp.inner.for.end:
6066 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6067 // CHECK4:       omp.loop.exit:
6068 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
6069 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6070 // CHECK4-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
6071 // CHECK4-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6072 // CHECK4:       .omp.final.then:
6073 // CHECK4-NEXT:    store i32 10, i32* [[A_ADDR]], align 4
6074 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6075 // CHECK4:       .omp.final.done:
6076 // CHECK4-NEXT:    ret void
6077 //
6078 //
6079 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
6080 // CHECK4-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR2]] {
6081 // CHECK4-NEXT:  entry:
6082 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6083 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
6084 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6085 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6086 // CHECK4-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
6087 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
6088 // CHECK4-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
6089 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
6090 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]])
6091 // CHECK4-NEXT:    ret void
6092 //
6093 //
6094 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
6095 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
6096 // CHECK4-NEXT:  entry:
6097 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6098 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6099 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6100 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6101 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6102 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6103 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6104 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6105 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6106 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6107 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6108 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6109 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6110 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6111 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6112 // CHECK4-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
6113 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6114 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6115 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6116 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
6117 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6118 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6119 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
6120 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6121 // CHECK4:       cond.true:
6122 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6123 // CHECK4:       cond.false:
6124 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6125 // CHECK4-NEXT:    br label [[COND_END]]
6126 // CHECK4:       cond.end:
6127 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
6128 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6129 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6130 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
6131 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6132 // CHECK4:       omp.inner.for.cond:
6133 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
6134 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
6135 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
6136 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6137 // CHECK4:       omp.inner.for.body:
6138 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
6139 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
6140 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6141 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !30
6142 // CHECK4-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
6143 // CHECK4-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
6144 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
6145 // CHECK4-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
6146 // CHECK4-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !30
6147 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6148 // CHECK4:       omp.body.continue:
6149 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6150 // CHECK4:       omp.inner.for.inc:
6151 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
6152 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
6153 // CHECK4-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
6154 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
6155 // CHECK4:       omp.inner.for.end:
6156 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6157 // CHECK4:       omp.loop.exit:
6158 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
6159 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6160 // CHECK4-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
6161 // CHECK4-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6162 // CHECK4:       .omp.final.then:
6163 // CHECK4-NEXT:    store i32 10, i32* [[I]], align 4
6164 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6165 // CHECK4:       .omp.final.done:
6166 // CHECK4-NEXT:    ret void
6167 //
6168 //
6169 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
6170 // CHECK4-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] {
6171 // CHECK4-NEXT:  entry:
6172 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6173 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6174 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
6175 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
6176 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6177 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6178 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6179 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
6180 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
6181 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
6182 // CHECK4-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
6183 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
6184 // CHECK4-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
6185 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
6186 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
6187 // CHECK4-NEXT:    ret void
6188 //
6189 //
6190 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..6
6191 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
6192 // CHECK4-NEXT:  entry:
6193 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6194 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6195 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6196 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6197 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6198 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6199 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6200 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6201 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6202 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6203 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6204 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6205 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6206 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6207 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6208 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6209 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6210 // CHECK4-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
6211 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6212 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6213 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6214 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
6215 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6216 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6217 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
6218 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6219 // CHECK4:       cond.true:
6220 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6221 // CHECK4:       cond.false:
6222 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6223 // CHECK4-NEXT:    br label [[COND_END]]
6224 // CHECK4:       cond.end:
6225 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
6226 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6227 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6228 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
6229 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6230 // CHECK4:       omp.inner.for.cond:
6231 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
6232 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
6233 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
6234 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6235 // CHECK4:       omp.inner.for.body:
6236 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
6237 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
6238 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6239 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
6240 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33
6241 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
6242 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !33
6243 // CHECK4-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !33
6244 // CHECK4-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP9]] to i32
6245 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
6246 // CHECK4-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
6247 // CHECK4-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !33
6248 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6249 // CHECK4:       omp.body.continue:
6250 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6251 // CHECK4:       omp.inner.for.inc:
6252 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
6253 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1
6254 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
6255 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
6256 // CHECK4:       omp.inner.for.end:
6257 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6258 // CHECK4:       omp.loop.exit:
6259 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
6260 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6261 // CHECK4-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
6262 // CHECK4-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6263 // CHECK4:       .omp.final.then:
6264 // CHECK4-NEXT:    store i32 10, i32* [[I]], align 4
6265 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6266 // CHECK4:       .omp.final.done:
6267 // CHECK4-NEXT:    ret void
6268 //
6269 //
6270 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
6271 // CHECK4-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
6272 // CHECK4-NEXT:  entry:
6273 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6274 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
6275 // CHECK4-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
6276 // CHECK4-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
6277 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
6278 // CHECK4-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
6279 // CHECK4-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
6280 // CHECK4-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
6281 // CHECK4-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
6282 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
6283 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6284 // CHECK4-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
6285 // CHECK4-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
6286 // CHECK4-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
6287 // CHECK4-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
6288 // CHECK4-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
6289 // CHECK4-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
6290 // CHECK4-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
6291 // CHECK4-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
6292 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
6293 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
6294 // CHECK4-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
6295 // CHECK4-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
6296 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
6297 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
6298 // CHECK4-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
6299 // CHECK4-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
6300 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
6301 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
6302 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
6303 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
6304 // CHECK4-NEXT:    ret void
6305 //
6306 //
6307 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..9
6308 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR3]] {
6309 // CHECK4-NEXT:  entry:
6310 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6311 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6312 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6313 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
6314 // CHECK4-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
6315 // CHECK4-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
6316 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
6317 // CHECK4-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
6318 // CHECK4-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
6319 // CHECK4-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
6320 // CHECK4-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
6321 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6322 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6323 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6324 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6325 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6326 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6327 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6328 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6329 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6330 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6331 // CHECK4-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
6332 // CHECK4-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
6333 // CHECK4-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
6334 // CHECK4-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
6335 // CHECK4-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
6336 // CHECK4-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
6337 // CHECK4-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
6338 // CHECK4-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
6339 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
6340 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
6341 // CHECK4-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
6342 // CHECK4-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
6343 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
6344 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
6345 // CHECK4-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
6346 // CHECK4-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
6347 // CHECK4-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 0
6348 // CHECK4-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
6349 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6350 // CHECK4-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
6351 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6352 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6353 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6354 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6355 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6356 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6357 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
6358 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6359 // CHECK4:       cond.true:
6360 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6361 // CHECK4:       cond.false:
6362 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6363 // CHECK4-NEXT:    br label [[COND_END]]
6364 // CHECK4:       cond.end:
6365 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
6366 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6367 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6368 // CHECK4-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
6369 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6370 // CHECK4:       omp.inner.for.cond:
6371 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
6372 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
6373 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
6374 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6375 // CHECK4:       omp.inner.for.body:
6376 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
6377 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
6378 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6379 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !36
6380 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36
6381 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
6382 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[A_ADDR]], align 4, !llvm.access.group !36
6383 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
6384 // CHECK4-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36
6385 // CHECK4-NEXT:    [[CONV:%.*]] = fpext float [[TMP17]] to double
6386 // CHECK4-NEXT:    [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00
6387 // CHECK4-NEXT:    [[CONV8:%.*]] = fptrunc double [[ADD7]] to float
6388 // CHECK4-NEXT:    store float [[CONV8]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36
6389 // CHECK4-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
6390 // CHECK4-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4, !llvm.access.group !36
6391 // CHECK4-NEXT:    [[CONV10:%.*]] = fpext float [[TMP18]] to double
6392 // CHECK4-NEXT:    [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00
6393 // CHECK4-NEXT:    [[CONV12:%.*]] = fptrunc double [[ADD11]] to float
6394 // CHECK4-NEXT:    store float [[CONV12]], float* [[ARRAYIDX9]], align 4, !llvm.access.group !36
6395 // CHECK4-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
6396 // CHECK4-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX13]], i32 0, i32 2
6397 // CHECK4-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !36
6398 // CHECK4-NEXT:    [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00
6399 // CHECK4-NEXT:    store double [[ADD15]], double* [[ARRAYIDX14]], align 8, !llvm.access.group !36
6400 // CHECK4-NEXT:    [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]]
6401 // CHECK4-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP20]]
6402 // CHECK4-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX16]], i32 3
6403 // CHECK4-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !36
6404 // CHECK4-NEXT:    [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00
6405 // CHECK4-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !36
6406 // CHECK4-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
6407 // CHECK4-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36
6408 // CHECK4-NEXT:    [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1
6409 // CHECK4-NEXT:    store i64 [[ADD19]], i64* [[X]], align 4, !llvm.access.group !36
6410 // CHECK4-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
6411 // CHECK4-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36
6412 // CHECK4-NEXT:    [[CONV20:%.*]] = sext i8 [[TMP23]] to i32
6413 // CHECK4-NEXT:    [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1
6414 // CHECK4-NEXT:    [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8
6415 // CHECK4-NEXT:    store i8 [[CONV22]], i8* [[Y]], align 4, !llvm.access.group !36
6416 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6417 // CHECK4:       omp.body.continue:
6418 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6419 // CHECK4:       omp.inner.for.inc:
6420 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
6421 // CHECK4-NEXT:    [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1
6422 // CHECK4-NEXT:    store i32 [[ADD23]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
6423 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
6424 // CHECK4:       omp.inner.for.end:
6425 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6426 // CHECK4:       omp.loop.exit:
6427 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
6428 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6429 // CHECK4-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
6430 // CHECK4-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6431 // CHECK4:       .omp.final.then:
6432 // CHECK4-NEXT:    store i32 10, i32* [[I]], align 4
6433 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6434 // CHECK4:       .omp.final.done:
6435 // CHECK4-NEXT:    ret void
6436 //
6437 //
6438 // CHECK4-LABEL: define {{[^@]+}}@_Z3bari
6439 // CHECK4-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
6440 // CHECK4-NEXT:  entry:
6441 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6442 // CHECK4-NEXT:    [[A:%.*]] = alloca i32, align 4
6443 // CHECK4-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
6444 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6445 // CHECK4-NEXT:    store i32 0, i32* [[A]], align 4
6446 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
6447 // CHECK4-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
6448 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
6449 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
6450 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
6451 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
6452 // CHECK4-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
6453 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
6454 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
6455 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
6456 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
6457 // CHECK4-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
6458 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
6459 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
6460 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
6461 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
6462 // CHECK4-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
6463 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
6464 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
6465 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
6466 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
6467 // CHECK4-NEXT:    ret i32 [[TMP8]]
6468 //
6469 //
6470 // CHECK4-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
6471 // CHECK4-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
6472 // CHECK4-NEXT:  entry:
6473 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
6474 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6475 // CHECK4-NEXT:    [[B:%.*]] = alloca i32, align 4
6476 // CHECK4-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
6477 // CHECK4-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
6478 // CHECK4-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
6479 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
6480 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
6481 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
6482 // CHECK4-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4
6483 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6484 // CHECK4-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
6485 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6486 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
6487 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
6488 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
6489 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
6490 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
6491 // CHECK4-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
6492 // CHECK4-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
6493 // CHECK4-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
6494 // CHECK4-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
6495 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
6496 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B]], align 4
6497 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
6498 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
6499 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
6500 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60
6501 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
6502 // CHECK4:       omp_if.then:
6503 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
6504 // CHECK4-NEXT:    [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]]
6505 // CHECK4-NEXT:    [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2
6506 // CHECK4-NEXT:    [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
6507 // CHECK4-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6508 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1**
6509 // CHECK4-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4
6510 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6511 // CHECK4-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
6512 // CHECK4-NEXT:    store double* [[A]], double** [[TMP13]], align 4
6513 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
6514 // CHECK4-NEXT:    store i64 8, i64* [[TMP14]], align 4
6515 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
6516 // CHECK4-NEXT:    store i8* null, i8** [[TMP15]], align 4
6517 // CHECK4-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
6518 // CHECK4-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
6519 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[TMP17]], align 4
6520 // CHECK4-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
6521 // CHECK4-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
6522 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[TMP19]], align 4
6523 // CHECK4-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
6524 // CHECK4-NEXT:    store i64 4, i64* [[TMP20]], align 4
6525 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
6526 // CHECK4-NEXT:    store i8* null, i8** [[TMP21]], align 4
6527 // CHECK4-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
6528 // CHECK4-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
6529 // CHECK4-NEXT:    store i32 2, i32* [[TMP23]], align 4
6530 // CHECK4-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
6531 // CHECK4-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
6532 // CHECK4-NEXT:    store i32 2, i32* [[TMP25]], align 4
6533 // CHECK4-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
6534 // CHECK4-NEXT:    store i64 4, i64* [[TMP26]], align 4
6535 // CHECK4-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
6536 // CHECK4-NEXT:    store i8* null, i8** [[TMP27]], align 4
6537 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
6538 // CHECK4-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
6539 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP29]], align 4
6540 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
6541 // CHECK4-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32*
6542 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP31]], align 4
6543 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
6544 // CHECK4-NEXT:    store i64 4, i64* [[TMP32]], align 4
6545 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
6546 // CHECK4-NEXT:    store i8* null, i8** [[TMP33]], align 4
6547 // CHECK4-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
6548 // CHECK4-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
6549 // CHECK4-NEXT:    store i16* [[VLA]], i16** [[TMP35]], align 4
6550 // CHECK4-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
6551 // CHECK4-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
6552 // CHECK4-NEXT:    store i16* [[VLA]], i16** [[TMP37]], align 4
6553 // CHECK4-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
6554 // CHECK4-NEXT:    store i64 [[TMP9]], i64* [[TMP38]], align 4
6555 // CHECK4-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
6556 // CHECK4-NEXT:    store i8* null, i8** [[TMP39]], align 4
6557 // CHECK4-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6558 // CHECK4-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6559 // CHECK4-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
6560 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
6561 // CHECK4-NEXT:    [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
6562 // CHECK4-NEXT:    [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
6563 // CHECK4-NEXT:    br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
6564 // CHECK4:       omp_offload.failed:
6565 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]]
6566 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
6567 // CHECK4:       omp_offload.cont:
6568 // CHECK4-NEXT:    br label [[OMP_IF_END:%.*]]
6569 // CHECK4:       omp_if.else:
6570 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]]
6571 // CHECK4-NEXT:    br label [[OMP_IF_END]]
6572 // CHECK4:       omp_if.end:
6573 // CHECK4-NEXT:    [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]]
6574 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]]
6575 // CHECK4-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
6576 // CHECK4-NEXT:    [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
6577 // CHECK4-NEXT:    [[CONV:%.*]] = sext i16 [[TMP46]] to i32
6578 // CHECK4-NEXT:    [[TMP47:%.*]] = load i32, i32* [[B]], align 4
6579 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]]
6580 // CHECK4-NEXT:    [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
6581 // CHECK4-NEXT:    call void @llvm.stackrestore(i8* [[TMP48]])
6582 // CHECK4-NEXT:    ret i32 [[ADD3]]
6583 //
6584 //
6585 // CHECK4-LABEL: define {{[^@]+}}@_ZL7fstatici
6586 // CHECK4-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
6587 // CHECK4-NEXT:  entry:
6588 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6589 // CHECK4-NEXT:    [[A:%.*]] = alloca i32, align 4
6590 // CHECK4-NEXT:    [[AA:%.*]] = alloca i16, align 2
6591 // CHECK4-NEXT:    [[AAA:%.*]] = alloca i8, align 1
6592 // CHECK4-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
6593 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
6594 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
6595 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
6596 // CHECK4-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
6597 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
6598 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
6599 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
6600 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6601 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6602 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6603 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
6604 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6605 // CHECK4-NEXT:    store i32 0, i32* [[A]], align 4
6606 // CHECK4-NEXT:    store i16 0, i16* [[AA]], align 2
6607 // CHECK4-NEXT:    store i8 0, i8* [[AAA]], align 1
6608 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
6609 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
6610 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
6611 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
6612 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
6613 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
6614 // CHECK4-NEXT:    [[TMP4:%.*]] = load i16, i16* [[AA]], align 2
6615 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
6616 // CHECK4-NEXT:    store i16 [[TMP4]], i16* [[CONV]], align 2
6617 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[AA_CASTED]], align 4
6618 // CHECK4-NEXT:    [[TMP6:%.*]] = load i8, i8* [[AAA]], align 1
6619 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
6620 // CHECK4-NEXT:    store i8 [[TMP6]], i8* [[CONV1]], align 1
6621 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
6622 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[N_ADDR]], align 4
6623 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50
6624 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
6625 // CHECK4:       omp_if.then:
6626 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6627 // CHECK4-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
6628 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
6629 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6630 // CHECK4-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32*
6631 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP12]], align 4
6632 // CHECK4-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
6633 // CHECK4-NEXT:    store i8* null, i8** [[TMP13]], align 4
6634 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
6635 // CHECK4-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
6636 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
6637 // CHECK4-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
6638 // CHECK4-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
6639 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP17]], align 4
6640 // CHECK4-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
6641 // CHECK4-NEXT:    store i8* null, i8** [[TMP18]], align 4
6642 // CHECK4-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
6643 // CHECK4-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
6644 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[TMP20]], align 4
6645 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
6646 // CHECK4-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
6647 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[TMP22]], align 4
6648 // CHECK4-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
6649 // CHECK4-NEXT:    store i8* null, i8** [[TMP23]], align 4
6650 // CHECK4-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
6651 // CHECK4-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
6652 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[TMP25]], align 4
6653 // CHECK4-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
6654 // CHECK4-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
6655 // CHECK4-NEXT:    store i32 [[TMP7]], i32* [[TMP27]], align 4
6656 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
6657 // CHECK4-NEXT:    store i8* null, i8** [[TMP28]], align 4
6658 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
6659 // CHECK4-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [10 x i32]**
6660 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP30]], align 4
6661 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
6662 // CHECK4-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [10 x i32]**
6663 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP32]], align 4
6664 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
6665 // CHECK4-NEXT:    store i8* null, i8** [[TMP33]], align 4
6666 // CHECK4-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6667 // CHECK4-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6668 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
6669 // CHECK4-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_]], align 4
6670 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N_ADDR]], align 4
6671 // CHECK4-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6672 // CHECK4-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6673 // CHECK4-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6674 // CHECK4-NEXT:    [[SUB:%.*]] = sub i32 [[TMP38]], [[TMP39]]
6675 // CHECK4-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
6676 // CHECK4-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
6677 // CHECK4-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
6678 // CHECK4-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
6679 // CHECK4-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
6680 // CHECK4-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6681 // CHECK4-NEXT:    [[ADD6:%.*]] = add i32 [[TMP40]], 1
6682 // CHECK4-NEXT:    [[TMP41:%.*]] = zext i32 [[ADD6]] to i64
6683 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]])
6684 // CHECK4-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
6685 // CHECK4-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
6686 // CHECK4-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
6687 // CHECK4:       omp_offload.failed:
6688 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
6689 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
6690 // CHECK4:       omp_offload.cont:
6691 // CHECK4-NEXT:    br label [[OMP_IF_END:%.*]]
6692 // CHECK4:       omp_if.else:
6693 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
6694 // CHECK4-NEXT:    br label [[OMP_IF_END]]
6695 // CHECK4:       omp_if.end:
6696 // CHECK4-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
6697 // CHECK4-NEXT:    ret i32 [[TMP44]]
6698 //
6699 //
6700 // CHECK4-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
6701 // CHECK4-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
6702 // CHECK4-NEXT:  entry:
6703 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6704 // CHECK4-NEXT:    [[A:%.*]] = alloca i32, align 4
6705 // CHECK4-NEXT:    [[AA:%.*]] = alloca i16, align 2
6706 // CHECK4-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
6707 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
6708 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
6709 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
6710 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
6711 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
6712 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6713 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6714 // CHECK4-NEXT:    store i32 0, i32* [[A]], align 4
6715 // CHECK4-NEXT:    store i16 0, i16* [[AA]], align 2
6716 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
6717 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
6718 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
6719 // CHECK4-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
6720 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
6721 // CHECK4-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
6722 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
6723 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
6724 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
6725 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
6726 // CHECK4:       omp_if.then:
6727 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6728 // CHECK4-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
6729 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
6730 // CHECK4-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6731 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
6732 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
6733 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
6734 // CHECK4-NEXT:    store i8* null, i8** [[TMP9]], align 4
6735 // CHECK4-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
6736 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
6737 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
6738 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
6739 // CHECK4-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
6740 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
6741 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
6742 // CHECK4-NEXT:    store i8* null, i8** [[TMP14]], align 4
6743 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
6744 // CHECK4-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
6745 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
6746 // CHECK4-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
6747 // CHECK4-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
6748 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
6749 // CHECK4-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
6750 // CHECK4-NEXT:    store i8* null, i8** [[TMP19]], align 4
6751 // CHECK4-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6752 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6753 // CHECK4-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
6754 // CHECK4-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
6755 // CHECK4-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
6756 // CHECK4-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
6757 // CHECK4:       omp_offload.failed:
6758 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
6759 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
6760 // CHECK4:       omp_offload.cont:
6761 // CHECK4-NEXT:    br label [[OMP_IF_END:%.*]]
6762 // CHECK4:       omp_if.else:
6763 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
6764 // CHECK4-NEXT:    br label [[OMP_IF_END]]
6765 // CHECK4:       omp_if.end:
6766 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
6767 // CHECK4-NEXT:    ret i32 [[TMP24]]
6768 //
6769 //
6770 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
6771 // CHECK4-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
6772 // CHECK4-NEXT:  entry:
6773 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
6774 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
6775 // CHECK4-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
6776 // CHECK4-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
6777 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
6778 // CHECK4-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
6779 // CHECK4-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
6780 // CHECK4-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
6781 // CHECK4-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
6782 // CHECK4-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
6783 // CHECK4-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
6784 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
6785 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
6786 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
6787 // CHECK4-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
6788 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
6789 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
6790 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
6791 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
6792 // CHECK4-NEXT:    ret void
6793 //
6794 //
6795 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11
6796 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] {
6797 // CHECK4-NEXT:  entry:
6798 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6799 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6800 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
6801 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
6802 // CHECK4-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
6803 // CHECK4-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
6804 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
6805 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6806 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6807 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6808 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6809 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6810 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6811 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6812 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6813 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6814 // CHECK4-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
6815 // CHECK4-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
6816 // CHECK4-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
6817 // CHECK4-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
6818 // CHECK4-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
6819 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
6820 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
6821 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
6822 // CHECK4-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
6823 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6824 // CHECK4-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
6825 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6826 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6827 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6828 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
6829 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6830 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6831 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
6832 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6833 // CHECK4:       cond.true:
6834 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6835 // CHECK4:       cond.false:
6836 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6837 // CHECK4-NEXT:    br label [[COND_END]]
6838 // CHECK4:       cond.end:
6839 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
6840 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6841 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6842 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
6843 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6844 // CHECK4:       omp.inner.for.cond:
6845 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
6846 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39
6847 // CHECK4-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
6848 // CHECK4-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6849 // CHECK4:       omp.inner.for.body:
6850 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
6851 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
6852 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6853 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39
6854 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39
6855 // CHECK4-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP12]] to double
6856 // CHECK4-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
6857 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
6858 // CHECK4-NEXT:    store double [[ADD4]], double* [[A]], align 4, !llvm.access.group !39
6859 // CHECK4-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
6860 // CHECK4-NEXT:    [[TMP13:%.*]] = load double, double* [[A5]], align 4, !llvm.access.group !39
6861 // CHECK4-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
6862 // CHECK4-NEXT:    store double [[INC]], double* [[A5]], align 4, !llvm.access.group !39
6863 // CHECK4-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
6864 // CHECK4-NEXT:    [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]]
6865 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]]
6866 // CHECK4-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
6867 // CHECK4-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !39
6868 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6869 // CHECK4:       omp.body.continue:
6870 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6871 // CHECK4:       omp.inner.for.inc:
6872 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
6873 // CHECK4-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1
6874 // CHECK4-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
6875 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
6876 // CHECK4:       omp.inner.for.end:
6877 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6878 // CHECK4:       omp.loop.exit:
6879 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
6880 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6881 // CHECK4-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
6882 // CHECK4-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6883 // CHECK4:       .omp.final.then:
6884 // CHECK4-NEXT:    store i32 10, i32* [[I]], align 4
6885 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6886 // CHECK4:       .omp.final.done:
6887 // CHECK4-NEXT:    ret void
6888 //
6889 //
6890 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
6891 // CHECK4-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
6892 // CHECK4-NEXT:  entry:
6893 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6894 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6895 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6896 // CHECK4-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
6897 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
6898 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
6899 // CHECK4-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
6900 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
6901 // CHECK4-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
6902 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6903 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6904 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6905 // CHECK4-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
6906 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
6907 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6908 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
6909 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
6910 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
6911 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
6912 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
6913 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
6914 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
6915 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
6916 // CHECK4-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
6917 // CHECK4-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
6918 // CHECK4-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
6919 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AA_CASTED]], align 4
6920 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV1]], align 1
6921 // CHECK4-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
6922 // CHECK4-NEXT:    store i8 [[TMP7]], i8* [[CONV3]], align 1
6923 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
6924 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]])
6925 // CHECK4-NEXT:    ret void
6926 //
6927 //
6928 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13
6929 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
6930 // CHECK4-NEXT:  entry:
6931 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6932 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6933 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6934 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6935 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6936 // CHECK4-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
6937 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
6938 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6939 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6940 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6941 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6942 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
6943 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
6944 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6945 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6946 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6947 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6948 // CHECK4-NEXT:    [[I6:%.*]] = alloca i32, align 4
6949 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6950 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6951 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6952 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6953 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6954 // CHECK4-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
6955 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
6956 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6957 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
6958 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
6959 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
6960 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
6961 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
6962 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6963 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6964 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6965 // CHECK4-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
6966 // CHECK4-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
6967 // CHECK4-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
6968 // CHECK4-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
6969 // CHECK4-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
6970 // CHECK4-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
6971 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6972 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
6973 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6974 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6975 // CHECK4-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
6976 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6977 // CHECK4:       omp.precond.then:
6978 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6979 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6980 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
6981 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6982 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6983 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6984 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
6985 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6986 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6987 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6988 // CHECK4-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
6989 // CHECK4-NEXT:    br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6990 // CHECK4:       cond.true:
6991 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
6992 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6993 // CHECK4:       cond.false:
6994 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6995 // CHECK4-NEXT:    br label [[COND_END]]
6996 // CHECK4:       cond.end:
6997 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
6998 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6999 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7000 // CHECK4-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
7001 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7002 // CHECK4:       omp.inner.for.cond:
7003 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
7004 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !42
7005 // CHECK4-NEXT:    [[ADD8:%.*]] = add i32 [[TMP17]], 1
7006 // CHECK4-NEXT:    [[CMP9:%.*]] = icmp ult i32 [[TMP16]], [[ADD8]]
7007 // CHECK4-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7008 // CHECK4:       omp.inner.for.body:
7009 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !42
7010 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
7011 // CHECK4-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
7012 // CHECK4-NEXT:    [[ADD10:%.*]] = add i32 [[TMP18]], [[MUL]]
7013 // CHECK4-NEXT:    store i32 [[ADD10]], i32* [[I6]], align 4, !llvm.access.group !42
7014 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !42
7015 // CHECK4-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP20]], 1
7016 // CHECK4-NEXT:    store i32 [[ADD11]], i32* [[A_ADDR]], align 4, !llvm.access.group !42
7017 // CHECK4-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !42
7018 // CHECK4-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP21]] to i32
7019 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
7020 // CHECK4-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
7021 // CHECK4-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !42
7022 // CHECK4-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV1]], align 1, !llvm.access.group !42
7023 // CHECK4-NEXT:    [[CONV15:%.*]] = sext i8 [[TMP22]] to i32
7024 // CHECK4-NEXT:    [[ADD16:%.*]] = add nsw i32 [[CONV15]], 1
7025 // CHECK4-NEXT:    [[CONV17:%.*]] = trunc i32 [[ADD16]] to i8
7026 // CHECK4-NEXT:    store i8 [[CONV17]], i8* [[CONV1]], align 1, !llvm.access.group !42
7027 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
7028 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !42
7029 // CHECK4-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP23]], 1
7030 // CHECK4-NEXT:    store i32 [[ADD18]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !42
7031 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7032 // CHECK4:       omp.body.continue:
7033 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7034 // CHECK4:       omp.inner.for.inc:
7035 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
7036 // CHECK4-NEXT:    [[ADD19:%.*]] = add i32 [[TMP24]], 1
7037 // CHECK4-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
7038 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
7039 // CHECK4:       omp.inner.for.end:
7040 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7041 // CHECK4:       omp.loop.exit:
7042 // CHECK4-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7043 // CHECK4-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
7044 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
7045 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7046 // CHECK4-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
7047 // CHECK4-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7048 // CHECK4:       .omp.final.then:
7049 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7050 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7051 // CHECK4-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7052 // CHECK4-NEXT:    [[SUB20:%.*]] = sub i32 [[TMP30]], [[TMP31]]
7053 // CHECK4-NEXT:    [[SUB21:%.*]] = sub i32 [[SUB20]], 1
7054 // CHECK4-NEXT:    [[ADD22:%.*]] = add i32 [[SUB21]], 1
7055 // CHECK4-NEXT:    [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
7056 // CHECK4-NEXT:    [[MUL24:%.*]] = mul i32 [[DIV23]], 1
7057 // CHECK4-NEXT:    [[ADD25:%.*]] = add i32 [[TMP29]], [[MUL24]]
7058 // CHECK4-NEXT:    store i32 [[ADD25]], i32* [[I6]], align 4
7059 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7060 // CHECK4:       .omp.final.done:
7061 // CHECK4-NEXT:    br label [[OMP_PRECOND_END]]
7062 // CHECK4:       omp.precond.end:
7063 // CHECK4-NEXT:    ret void
7064 //
7065 //
7066 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
7067 // CHECK4-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
7068 // CHECK4-NEXT:  entry:
7069 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
7070 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
7071 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
7072 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
7073 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
7074 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
7075 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
7076 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
7077 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
7078 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
7079 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
7080 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
7081 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
7082 // CHECK4-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
7083 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
7084 // CHECK4-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
7085 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
7086 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
7087 // CHECK4-NEXT:    ret void
7088 //
7089 //
7090 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16
7091 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
7092 // CHECK4-NEXT:  entry:
7093 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
7094 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
7095 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
7096 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
7097 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
7098 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7099 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7100 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7101 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7102 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7103 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7104 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
7105 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
7106 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
7107 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
7108 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
7109 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
7110 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
7111 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
7112 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7113 // CHECK4-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
7114 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7115 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7116 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
7117 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
7118 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7119 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7120 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
7121 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7122 // CHECK4:       cond.true:
7123 // CHECK4-NEXT:    br label [[COND_END:%.*]]
7124 // CHECK4:       cond.false:
7125 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7126 // CHECK4-NEXT:    br label [[COND_END]]
7127 // CHECK4:       cond.end:
7128 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7129 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7130 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7131 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
7132 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7133 // CHECK4:       omp.inner.for.cond:
7134 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
7135 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !45
7136 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7137 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7138 // CHECK4:       omp.inner.for.body:
7139 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
7140 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
7141 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7142 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !45
7143 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !45
7144 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
7145 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !45
7146 // CHECK4-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !45
7147 // CHECK4-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
7148 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
7149 // CHECK4-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
7150 // CHECK4-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !45
7151 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
7152 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
7153 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
7154 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
7155 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7156 // CHECK4:       omp.body.continue:
7157 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7158 // CHECK4:       omp.inner.for.inc:
7159 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
7160 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
7161 // CHECK4-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
7162 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
7163 // CHECK4:       omp.inner.for.end:
7164 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7165 // CHECK4:       omp.loop.exit:
7166 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
7167 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7168 // CHECK4-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
7169 // CHECK4-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7170 // CHECK4:       .omp.final.then:
7171 // CHECK4-NEXT:    store i32 10, i32* [[I]], align 4
7172 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7173 // CHECK4:       .omp.final.done:
7174 // CHECK4-NEXT:    ret void
7175 //
7176 //
7177 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
7178 // CHECK4-SAME: () #[[ATTR5]] {
7179 // CHECK4-NEXT:  entry:
7180 // CHECK4-NEXT:    call void @__tgt_register_requires(i64 1)
7181 // CHECK4-NEXT:    ret void
7182 //
7183 //
7184 // CHECK5-LABEL: define {{[^@]+}}@_Z3fooi
7185 // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
7186 // CHECK5-NEXT:  entry:
7187 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7188 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
7189 // CHECK5-NEXT:    [[AA:%.*]] = alloca i16, align 2
7190 // CHECK5-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
7191 // CHECK5-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
7192 // CHECK5-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
7193 // CHECK5-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
7194 // CHECK5-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
7195 // CHECK5-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
7196 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7197 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
7198 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
7199 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7200 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED4:%.*]] = alloca i64, align 8
7201 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
7202 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
7203 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
7204 // CHECK5-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
7205 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
7206 // CHECK5-NEXT:    [[AA_CASTED7:%.*]] = alloca i64, align 8
7207 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS9:%.*]] = alloca [1 x i8*], align 8
7208 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS10:%.*]] = alloca [1 x i8*], align 8
7209 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS11:%.*]] = alloca [1 x i8*], align 8
7210 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7211 // CHECK5-NEXT:    [[A_CASTED12:%.*]] = alloca i64, align 8
7212 // CHECK5-NEXT:    [[AA_CASTED14:%.*]] = alloca i64, align 8
7213 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [2 x i8*], align 8
7214 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS17:%.*]] = alloca [2 x i8*], align 8
7215 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [2 x i8*], align 8
7216 // CHECK5-NEXT:    [[_TMP19:%.*]] = alloca i32, align 4
7217 // CHECK5-NEXT:    [[A_CASTED22:%.*]] = alloca i64, align 8
7218 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [9 x i8*], align 8
7219 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS27:%.*]] = alloca [9 x i8*], align 8
7220 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [9 x i8*], align 8
7221 // CHECK5-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8
7222 // CHECK5-NEXT:    [[_TMP29:%.*]] = alloca i32, align 4
7223 // CHECK5-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
7224 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7225 // CHECK5-NEXT:    store i32 0, i32* [[A]], align 4
7226 // CHECK5-NEXT:    store i16 0, i16* [[AA]], align 2
7227 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
7228 // CHECK5-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
7229 // CHECK5-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
7230 // CHECK5-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
7231 // CHECK5-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
7232 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
7233 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
7234 // CHECK5-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
7235 // CHECK5-NEXT:    [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
7236 // CHECK5-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
7237 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
7238 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
7239 // CHECK5-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_]], align 4
7240 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
7241 // CHECK5-NEXT:    store i32 [[TMP8]], i32* [[DOTCAPTURE_EXPR_2]], align 4
7242 // CHECK5-NEXT:    [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
7243 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
7244 // CHECK5-NEXT:    store i16 [[TMP9]], i16* [[CONV]], align 2
7245 // CHECK5-NEXT:    [[TMP10:%.*]] = load i64, i64* [[AA_CASTED]], align 8
7246 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7247 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
7248 // CHECK5-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
7249 // CHECK5-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
7250 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7251 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED4]] to i32*
7252 // CHECK5-NEXT:    store i32 [[TMP13]], i32* [[CONV5]], align 4
7253 // CHECK5-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED4]], align 8
7254 // CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7255 // CHECK5-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64*
7256 // CHECK5-NEXT:    store i64 [[TMP10]], i64* [[TMP16]], align 8
7257 // CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7258 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
7259 // CHECK5-NEXT:    store i64 [[TMP10]], i64* [[TMP18]], align 8
7260 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
7261 // CHECK5-NEXT:    store i8* null, i8** [[TMP19]], align 8
7262 // CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
7263 // CHECK5-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64*
7264 // CHECK5-NEXT:    store i64 [[TMP12]], i64* [[TMP21]], align 8
7265 // CHECK5-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
7266 // CHECK5-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
7267 // CHECK5-NEXT:    store i64 [[TMP12]], i64* [[TMP23]], align 8
7268 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
7269 // CHECK5-NEXT:    store i8* null, i8** [[TMP24]], align 8
7270 // CHECK5-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
7271 // CHECK5-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64*
7272 // CHECK5-NEXT:    store i64 [[TMP14]], i64* [[TMP26]], align 8
7273 // CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
7274 // CHECK5-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64*
7275 // CHECK5-NEXT:    store i64 [[TMP14]], i64* [[TMP28]], align 8
7276 // CHECK5-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
7277 // CHECK5-NEXT:    store i8* null, i8** [[TMP29]], align 8
7278 // CHECK5-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7279 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7280 // CHECK5-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
7281 // CHECK5-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2
7282 // CHECK5-NEXT:    store i16 [[TMP33]], i16* [[TMP32]], align 4
7283 // CHECK5-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
7284 // CHECK5-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7285 // CHECK5-NEXT:    store i32 [[TMP35]], i32* [[TMP34]], align 4
7286 // CHECK5-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
7287 // CHECK5-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
7288 // CHECK5-NEXT:    store i32 [[TMP37]], i32* [[TMP36]], align 4
7289 // CHECK5-NEXT:    [[TMP38:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
7290 // CHECK5-NEXT:    [[TMP39:%.*]] = bitcast i8* [[TMP38]] to %struct.kmp_task_t_with_privates*
7291 // CHECK5-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 0
7292 // CHECK5-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP40]], i32 0, i32 0
7293 // CHECK5-NEXT:    [[TMP42:%.*]] = load i8*, i8** [[TMP41]], align 8
7294 // CHECK5-NEXT:    [[TMP43:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
7295 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i64 12, i1 false)
7296 // CHECK5-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 1
7297 // CHECK5-NEXT:    [[TMP45:%.*]] = bitcast i8* [[TMP42]] to %struct.anon*
7298 // CHECK5-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 0
7299 // CHECK5-NEXT:    [[TMP47:%.*]] = bitcast [3 x i8*]* [[TMP46]] to i8*
7300 // CHECK5-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP30]] to i8*
7301 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP47]], i8* align 8 [[TMP48]], i64 24, i1 false)
7302 // CHECK5-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 1
7303 // CHECK5-NEXT:    [[TMP50:%.*]] = bitcast [3 x i8*]* [[TMP49]] to i8*
7304 // CHECK5-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP31]] to i8*
7305 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP50]], i8* align 8 [[TMP51]], i64 24, i1 false)
7306 // CHECK5-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 2
7307 // CHECK5-NEXT:    [[TMP53:%.*]] = bitcast [3 x i64]* [[TMP52]] to i8*
7308 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP53]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes to i8*), i64 24, i1 false)
7309 // CHECK5-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 3
7310 // CHECK5-NEXT:    [[TMP55:%.*]] = load i16, i16* [[AA]], align 2
7311 // CHECK5-NEXT:    store i16 [[TMP55]], i16* [[TMP54]], align 8
7312 // CHECK5-NEXT:    [[TMP56:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP38]])
7313 // CHECK5-NEXT:    [[TMP57:%.*]] = load i32, i32* [[A]], align 4
7314 // CHECK5-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
7315 // CHECK5-NEXT:    store i32 [[TMP57]], i32* [[CONV6]], align 4
7316 // CHECK5-NEXT:    [[TMP58:%.*]] = load i64, i64* [[A_CASTED]], align 8
7317 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102(i64 [[TMP58]]) #[[ATTR4:[0-9]+]]
7318 // CHECK5-NEXT:    [[TMP59:%.*]] = load i16, i16* [[AA]], align 2
7319 // CHECK5-NEXT:    [[CONV8:%.*]] = bitcast i64* [[AA_CASTED7]] to i16*
7320 // CHECK5-NEXT:    store i16 [[TMP59]], i16* [[CONV8]], align 2
7321 // CHECK5-NEXT:    [[TMP60:%.*]] = load i64, i64* [[AA_CASTED7]], align 8
7322 // CHECK5-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
7323 // CHECK5-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64*
7324 // CHECK5-NEXT:    store i64 [[TMP60]], i64* [[TMP62]], align 8
7325 // CHECK5-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
7326 // CHECK5-NEXT:    [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64*
7327 // CHECK5-NEXT:    store i64 [[TMP60]], i64* [[TMP64]], align 8
7328 // CHECK5-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS11]], i64 0, i64 0
7329 // CHECK5-NEXT:    store i8* null, i8** [[TMP65]], align 8
7330 // CHECK5-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
7331 // CHECK5-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
7332 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
7333 // CHECK5-NEXT:    [[TMP68:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111.region_id, i32 1, i8** [[TMP66]], i8** [[TMP67]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7334 // CHECK5-NEXT:    [[TMP69:%.*]] = icmp ne i32 [[TMP68]], 0
7335 // CHECK5-NEXT:    br i1 [[TMP69]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
7336 // CHECK5:       omp_offload.failed:
7337 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i64 [[TMP60]]) #[[ATTR4]]
7338 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
7339 // CHECK5:       omp_offload.cont:
7340 // CHECK5-NEXT:    [[TMP70:%.*]] = load i32, i32* [[A]], align 4
7341 // CHECK5-NEXT:    [[CONV13:%.*]] = bitcast i64* [[A_CASTED12]] to i32*
7342 // CHECK5-NEXT:    store i32 [[TMP70]], i32* [[CONV13]], align 4
7343 // CHECK5-NEXT:    [[TMP71:%.*]] = load i64, i64* [[A_CASTED12]], align 8
7344 // CHECK5-NEXT:    [[TMP72:%.*]] = load i16, i16* [[AA]], align 2
7345 // CHECK5-NEXT:    [[CONV15:%.*]] = bitcast i64* [[AA_CASTED14]] to i16*
7346 // CHECK5-NEXT:    store i16 [[TMP72]], i16* [[CONV15]], align 2
7347 // CHECK5-NEXT:    [[TMP73:%.*]] = load i64, i64* [[AA_CASTED14]], align 8
7348 // CHECK5-NEXT:    [[TMP74:%.*]] = load i32, i32* [[N_ADDR]], align 4
7349 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP74]], 10
7350 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
7351 // CHECK5:       omp_if.then:
7352 // CHECK5-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
7353 // CHECK5-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
7354 // CHECK5-NEXT:    store i64 [[TMP71]], i64* [[TMP76]], align 8
7355 // CHECK5-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
7356 // CHECK5-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
7357 // CHECK5-NEXT:    store i64 [[TMP71]], i64* [[TMP78]], align 8
7358 // CHECK5-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0
7359 // CHECK5-NEXT:    store i8* null, i8** [[TMP79]], align 8
7360 // CHECK5-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
7361 // CHECK5-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
7362 // CHECK5-NEXT:    store i64 [[TMP73]], i64* [[TMP81]], align 8
7363 // CHECK5-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
7364 // CHECK5-NEXT:    [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64*
7365 // CHECK5-NEXT:    store i64 [[TMP73]], i64* [[TMP83]], align 8
7366 // CHECK5-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1
7367 // CHECK5-NEXT:    store i8* null, i8** [[TMP84]], align 8
7368 // CHECK5-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
7369 // CHECK5-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
7370 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
7371 // CHECK5-NEXT:    [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118.region_id, i32 2, i8** [[TMP85]], i8** [[TMP86]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7372 // CHECK5-NEXT:    [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0
7373 // CHECK5-NEXT:    br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
7374 // CHECK5:       omp_offload.failed20:
7375 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP71]], i64 [[TMP73]]) #[[ATTR4]]
7376 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT21]]
7377 // CHECK5:       omp_offload.cont21:
7378 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
7379 // CHECK5:       omp_if.else:
7380 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP71]], i64 [[TMP73]]) #[[ATTR4]]
7381 // CHECK5-NEXT:    br label [[OMP_IF_END]]
7382 // CHECK5:       omp_if.end:
7383 // CHECK5-NEXT:    [[TMP89:%.*]] = load i32, i32* [[A]], align 4
7384 // CHECK5-NEXT:    [[CONV23:%.*]] = bitcast i64* [[A_CASTED22]] to i32*
7385 // CHECK5-NEXT:    store i32 [[TMP89]], i32* [[CONV23]], align 4
7386 // CHECK5-NEXT:    [[TMP90:%.*]] = load i64, i64* [[A_CASTED22]], align 8
7387 // CHECK5-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N_ADDR]], align 4
7388 // CHECK5-NEXT:    [[CMP24:%.*]] = icmp sgt i32 [[TMP91]], 20
7389 // CHECK5-NEXT:    br i1 [[CMP24]], label [[OMP_IF_THEN25:%.*]], label [[OMP_IF_ELSE32:%.*]]
7390 // CHECK5:       omp_if.then25:
7391 // CHECK5-NEXT:    [[TMP92:%.*]] = mul nuw i64 [[TMP2]], 4
7392 // CHECK5-NEXT:    [[TMP93:%.*]] = mul nuw i64 5, [[TMP5]]
7393 // CHECK5-NEXT:    [[TMP94:%.*]] = mul nuw i64 [[TMP93]], 8
7394 // CHECK5-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
7395 // CHECK5-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64*
7396 // CHECK5-NEXT:    store i64 [[TMP90]], i64* [[TMP96]], align 8
7397 // CHECK5-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
7398 // CHECK5-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64*
7399 // CHECK5-NEXT:    store i64 [[TMP90]], i64* [[TMP98]], align 8
7400 // CHECK5-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
7401 // CHECK5-NEXT:    store i64 4, i64* [[TMP99]], align 8
7402 // CHECK5-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0
7403 // CHECK5-NEXT:    store i8* null, i8** [[TMP100]], align 8
7404 // CHECK5-NEXT:    [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1
7405 // CHECK5-NEXT:    [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]**
7406 // CHECK5-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8
7407 // CHECK5-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1
7408 // CHECK5-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]**
7409 // CHECK5-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8
7410 // CHECK5-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
7411 // CHECK5-NEXT:    store i64 40, i64* [[TMP105]], align 8
7412 // CHECK5-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1
7413 // CHECK5-NEXT:    store i8* null, i8** [[TMP106]], align 8
7414 // CHECK5-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2
7415 // CHECK5-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
7416 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[TMP108]], align 8
7417 // CHECK5-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2
7418 // CHECK5-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64*
7419 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[TMP110]], align 8
7420 // CHECK5-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
7421 // CHECK5-NEXT:    store i64 8, i64* [[TMP111]], align 8
7422 // CHECK5-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2
7423 // CHECK5-NEXT:    store i8* null, i8** [[TMP112]], align 8
7424 // CHECK5-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3
7425 // CHECK5-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float**
7426 // CHECK5-NEXT:    store float* [[VLA]], float** [[TMP114]], align 8
7427 // CHECK5-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3
7428 // CHECK5-NEXT:    [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float**
7429 // CHECK5-NEXT:    store float* [[VLA]], float** [[TMP116]], align 8
7430 // CHECK5-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
7431 // CHECK5-NEXT:    store i64 [[TMP92]], i64* [[TMP117]], align 8
7432 // CHECK5-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3
7433 // CHECK5-NEXT:    store i8* null, i8** [[TMP118]], align 8
7434 // CHECK5-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4
7435 // CHECK5-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]**
7436 // CHECK5-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8
7437 // CHECK5-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4
7438 // CHECK5-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]**
7439 // CHECK5-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8
7440 // CHECK5-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
7441 // CHECK5-NEXT:    store i64 400, i64* [[TMP123]], align 8
7442 // CHECK5-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4
7443 // CHECK5-NEXT:    store i8* null, i8** [[TMP124]], align 8
7444 // CHECK5-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5
7445 // CHECK5-NEXT:    [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64*
7446 // CHECK5-NEXT:    store i64 5, i64* [[TMP126]], align 8
7447 // CHECK5-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5
7448 // CHECK5-NEXT:    [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64*
7449 // CHECK5-NEXT:    store i64 5, i64* [[TMP128]], align 8
7450 // CHECK5-NEXT:    [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
7451 // CHECK5-NEXT:    store i64 8, i64* [[TMP129]], align 8
7452 // CHECK5-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5
7453 // CHECK5-NEXT:    store i8* null, i8** [[TMP130]], align 8
7454 // CHECK5-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6
7455 // CHECK5-NEXT:    [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64*
7456 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[TMP132]], align 8
7457 // CHECK5-NEXT:    [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6
7458 // CHECK5-NEXT:    [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64*
7459 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[TMP134]], align 8
7460 // CHECK5-NEXT:    [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
7461 // CHECK5-NEXT:    store i64 8, i64* [[TMP135]], align 8
7462 // CHECK5-NEXT:    [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6
7463 // CHECK5-NEXT:    store i8* null, i8** [[TMP136]], align 8
7464 // CHECK5-NEXT:    [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7
7465 // CHECK5-NEXT:    [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double**
7466 // CHECK5-NEXT:    store double* [[VLA1]], double** [[TMP138]], align 8
7467 // CHECK5-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7
7468 // CHECK5-NEXT:    [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double**
7469 // CHECK5-NEXT:    store double* [[VLA1]], double** [[TMP140]], align 8
7470 // CHECK5-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
7471 // CHECK5-NEXT:    store i64 [[TMP94]], i64* [[TMP141]], align 8
7472 // CHECK5-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7
7473 // CHECK5-NEXT:    store i8* null, i8** [[TMP142]], align 8
7474 // CHECK5-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8
7475 // CHECK5-NEXT:    [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT**
7476 // CHECK5-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8
7477 // CHECK5-NEXT:    [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8
7478 // CHECK5-NEXT:    [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT**
7479 // CHECK5-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8
7480 // CHECK5-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
7481 // CHECK5-NEXT:    store i64 16, i64* [[TMP147]], align 8
7482 // CHECK5-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8
7483 // CHECK5-NEXT:    store i8* null, i8** [[TMP148]], align 8
7484 // CHECK5-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
7485 // CHECK5-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
7486 // CHECK5-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
7487 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
7488 // CHECK5-NEXT:    [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7489 // CHECK5-NEXT:    [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0
7490 // CHECK5-NEXT:    br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
7491 // CHECK5:       omp_offload.failed30:
7492 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
7493 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
7494 // CHECK5:       omp_offload.cont31:
7495 // CHECK5-NEXT:    br label [[OMP_IF_END33:%.*]]
7496 // CHECK5:       omp_if.else32:
7497 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
7498 // CHECK5-NEXT:    br label [[OMP_IF_END33]]
7499 // CHECK5:       omp_if.end33:
7500 // CHECK5-NEXT:    [[TMP154:%.*]] = load i32, i32* [[A]], align 4
7501 // CHECK5-NEXT:    [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
7502 // CHECK5-NEXT:    call void @llvm.stackrestore(i8* [[TMP155]])
7503 // CHECK5-NEXT:    ret i32 [[TMP154]]
7504 //
7505 //
7506 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
7507 // CHECK5-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
7508 // CHECK5-NEXT:  entry:
7509 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7510 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7511 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
7512 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
7513 // CHECK5-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
7514 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7515 // CHECK5-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
7516 // CHECK5-NEXT:    store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8
7517 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7518 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
7519 // CHECK5-NEXT:    [[CONV4:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32*
7520 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV3]], align 4
7521 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV4]], align 4
7522 // CHECK5-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
7523 // CHECK5-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
7524 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
7525 // CHECK5-NEXT:    store i16 [[TMP3]], i16* [[CONV5]], align 2
7526 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
7527 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP4]])
7528 // CHECK5-NEXT:    ret void
7529 //
7530 //
7531 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
7532 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
7533 // CHECK5-NEXT:  entry:
7534 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7535 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7536 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7537 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7538 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7539 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7540 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7541 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7542 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7543 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
7544 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7545 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7546 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7547 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7548 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7549 // CHECK5-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
7550 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7551 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7552 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7553 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
7554 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7555 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7556 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
7557 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7558 // CHECK5:       cond.true:
7559 // CHECK5-NEXT:    br label [[COND_END:%.*]]
7560 // CHECK5:       cond.false:
7561 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7562 // CHECK5-NEXT:    br label [[COND_END]]
7563 // CHECK5:       cond.end:
7564 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
7565 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7566 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7567 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
7568 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7569 // CHECK5:       omp.inner.for.cond:
7570 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
7571 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
7572 // CHECK5-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
7573 // CHECK5-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7574 // CHECK5:       omp.inner.for.body:
7575 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
7576 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
7577 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7578 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
7579 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7580 // CHECK5:       omp.body.continue:
7581 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7582 // CHECK5:       omp.inner.for.inc:
7583 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
7584 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
7585 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
7586 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
7587 // CHECK5:       omp.inner.for.end:
7588 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7589 // CHECK5:       omp.loop.exit:
7590 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
7591 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7592 // CHECK5-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
7593 // CHECK5-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7594 // CHECK5:       .omp.final.then:
7595 // CHECK5-NEXT:    store i32 10, i32* [[I]], align 4
7596 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7597 // CHECK5:       .omp.final.done:
7598 // CHECK5-NEXT:    ret void
7599 //
7600 //
7601 // CHECK5-LABEL: define {{[^@]+}}@.omp_task_privates_map.
7602 // CHECK5-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i16** noalias noundef [[TMP1:%.*]], [3 x i8*]** noalias noundef [[TMP2:%.*]], [3 x i8*]** noalias noundef [[TMP3:%.*]], [3 x i64]** noalias noundef [[TMP4:%.*]]) #[[ATTR5:[0-9]+]] {
7603 // CHECK5-NEXT:  entry:
7604 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
7605 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i16**, align 8
7606 // CHECK5-NEXT:    [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 8
7607 // CHECK5-NEXT:    [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 8
7608 // CHECK5-NEXT:    [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 8
7609 // CHECK5-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
7610 // CHECK5-NEXT:    store i16** [[TMP1]], i16*** [[DOTADDR1]], align 8
7611 // CHECK5-NEXT:    store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 8
7612 // CHECK5-NEXT:    store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 8
7613 // CHECK5-NEXT:    store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 8
7614 // CHECK5-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
7615 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0
7616 // CHECK5-NEXT:    [[TMP7:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 8
7617 // CHECK5-NEXT:    store [3 x i8*]* [[TMP6]], [3 x i8*]** [[TMP7]], align 8
7618 // CHECK5-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1
7619 // CHECK5-NEXT:    [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 8
7620 // CHECK5-NEXT:    store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 8
7621 // CHECK5-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2
7622 // CHECK5-NEXT:    [[TMP11:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 8
7623 // CHECK5-NEXT:    store [3 x i64]* [[TMP10]], [3 x i64]** [[TMP11]], align 8
7624 // CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3
7625 // CHECK5-NEXT:    [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 8
7626 // CHECK5-NEXT:    store i16* [[TMP12]], i16** [[TMP13]], align 8
7627 // CHECK5-NEXT:    ret void
7628 //
7629 //
7630 // CHECK5-LABEL: define {{[^@]+}}@.omp_task_entry.
7631 // CHECK5-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
7632 // CHECK5-NEXT:  entry:
7633 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
7634 // CHECK5-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
7635 // CHECK5-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
7636 // CHECK5-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
7637 // CHECK5-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
7638 // CHECK5-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
7639 // CHECK5-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 8
7640 // CHECK5-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 8
7641 // CHECK5-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 8
7642 // CHECK5-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 8
7643 // CHECK5-NEXT:    [[AA_CASTED_I:%.*]] = alloca i64, align 8
7644 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED_I:%.*]] = alloca i64, align 8
7645 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED5_I:%.*]] = alloca i64, align 8
7646 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
7647 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
7648 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
7649 // CHECK5-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
7650 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
7651 // CHECK5-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
7652 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
7653 // CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
7654 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
7655 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
7656 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
7657 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
7658 // CHECK5-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
7659 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
7660 // CHECK5-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
7661 // CHECK5-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
7662 // CHECK5-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
7663 // CHECK5-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
7664 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26
7665 // CHECK5-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !26
7666 // CHECK5-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
7667 // CHECK5-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
7668 // CHECK5-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !26
7669 // CHECK5-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
7670 // CHECK5-NEXT:    [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
7671 // CHECK5-NEXT:    [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
7672 // CHECK5-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
7673 // CHECK5-NEXT:    [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)*
7674 // CHECK5-NEXT:    call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
7675 // CHECK5-NEXT:    [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !26
7676 // CHECK5-NEXT:    [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !26
7677 // CHECK5-NEXT:    [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !26
7678 // CHECK5-NEXT:    [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !26
7679 // CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i64 0, i64 0
7680 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i64 0, i64 0
7681 // CHECK5-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i64 0, i64 0
7682 // CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
7683 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
7684 // CHECK5-NEXT:    [[TMP25:%.*]] = load i32, i32* [[TMP23]], align 4
7685 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) #[[ATTR4]]
7686 // CHECK5-NEXT:    [[TMP26:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 [[TMP25]], i32 1, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
7687 // CHECK5-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
7688 // CHECK5-NEXT:    br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
7689 // CHECK5:       omp_offload.failed.i:
7690 // CHECK5-NEXT:    [[TMP28:%.*]] = load i16, i16* [[TMP16]], align 2
7691 // CHECK5-NEXT:    [[CONV_I:%.*]] = bitcast i64* [[AA_CASTED_I]] to i16*
7692 // CHECK5-NEXT:    store i16 [[TMP28]], i16* [[CONV_I]], align 2, !noalias !26
7693 // CHECK5-NEXT:    [[TMP29:%.*]] = load i64, i64* [[AA_CASTED_I]], align 8, !noalias !26
7694 // CHECK5-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP23]], align 4
7695 // CHECK5-NEXT:    [[CONV4_I:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED_I]] to i32*
7696 // CHECK5-NEXT:    store i32 [[TMP30]], i32* [[CONV4_I]], align 4, !noalias !26
7697 // CHECK5-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !26
7698 // CHECK5-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP24]], align 4
7699 // CHECK5-NEXT:    [[CONV6_I:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED5_I]] to i32*
7700 // CHECK5-NEXT:    store i32 [[TMP32]], i32* [[CONV6_I]], align 4, !noalias !26
7701 // CHECK5-NEXT:    [[TMP33:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED5_I]], align 8, !noalias !26
7702 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i64 [[TMP29]], i64 [[TMP31]], i64 [[TMP33]]) #[[ATTR4]]
7703 // CHECK5-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
7704 // CHECK5:       .omp_outlined..1.exit:
7705 // CHECK5-NEXT:    ret i32 0
7706 //
7707 //
7708 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102
7709 // CHECK5-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
7710 // CHECK5-NEXT:  entry:
7711 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7712 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
7713 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7714 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7715 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
7716 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
7717 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[CONV1]], align 4
7718 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
7719 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]])
7720 // CHECK5-NEXT:    ret void
7721 //
7722 //
7723 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2
7724 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
7725 // CHECK5-NEXT:  entry:
7726 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7727 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7728 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7729 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7730 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7731 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7732 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7733 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7734 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7735 // CHECK5-NEXT:    [[A1:%.*]] = alloca i32, align 4
7736 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7737 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7738 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7739 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7740 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7741 // CHECK5-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
7742 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7743 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7744 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7745 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
7746 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7747 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7748 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
7749 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7750 // CHECK5:       cond.true:
7751 // CHECK5-NEXT:    br label [[COND_END:%.*]]
7752 // CHECK5:       cond.false:
7753 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7754 // CHECK5-NEXT:    br label [[COND_END]]
7755 // CHECK5:       cond.end:
7756 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
7757 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7758 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7759 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
7760 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7761 // CHECK5:       omp.inner.for.cond:
7762 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7763 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7764 // CHECK5-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
7765 // CHECK5-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7766 // CHECK5:       omp.inner.for.body:
7767 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7768 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
7769 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7770 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4, !nontemporal !27
7771 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A1]], align 4, !nontemporal !27
7772 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
7773 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[A1]], align 4, !nontemporal !27
7774 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7775 // CHECK5:       omp.body.continue:
7776 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7777 // CHECK5:       omp.inner.for.inc:
7778 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7779 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1
7780 // CHECK5-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
7781 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
7782 // CHECK5:       omp.inner.for.end:
7783 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7784 // CHECK5:       omp.loop.exit:
7785 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
7786 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7787 // CHECK5-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
7788 // CHECK5-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7789 // CHECK5:       .omp.final.then:
7790 // CHECK5-NEXT:    store i32 10, i32* [[CONV]], align 4
7791 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7792 // CHECK5:       .omp.final.done:
7793 // CHECK5-NEXT:    ret void
7794 //
7795 //
7796 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
7797 // CHECK5-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR2]] {
7798 // CHECK5-NEXT:  entry:
7799 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7800 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
7801 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7802 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7803 // CHECK5-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
7804 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
7805 // CHECK5-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
7806 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
7807 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]])
7808 // CHECK5-NEXT:    ret void
7809 //
7810 //
7811 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3
7812 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
7813 // CHECK5-NEXT:  entry:
7814 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7815 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7816 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7817 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7818 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7819 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7820 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7821 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7822 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7823 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
7824 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7825 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7826 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7827 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7828 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7829 // CHECK5-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
7830 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7831 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7832 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7833 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
7834 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7835 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7836 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
7837 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7838 // CHECK5:       cond.true:
7839 // CHECK5-NEXT:    br label [[COND_END:%.*]]
7840 // CHECK5:       cond.false:
7841 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7842 // CHECK5-NEXT:    br label [[COND_END]]
7843 // CHECK5:       cond.end:
7844 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
7845 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7846 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7847 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
7848 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7849 // CHECK5:       omp.inner.for.cond:
7850 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
7851 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
7852 // CHECK5-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
7853 // CHECK5-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7854 // CHECK5:       omp.inner.for.body:
7855 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
7856 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
7857 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7858 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !30
7859 // CHECK5-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
7860 // CHECK5-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
7861 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
7862 // CHECK5-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
7863 // CHECK5-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !30
7864 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7865 // CHECK5:       omp.body.continue:
7866 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7867 // CHECK5:       omp.inner.for.inc:
7868 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
7869 // CHECK5-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
7870 // CHECK5-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
7871 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
7872 // CHECK5:       omp.inner.for.end:
7873 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7874 // CHECK5:       omp.loop.exit:
7875 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
7876 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7877 // CHECK5-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
7878 // CHECK5-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7879 // CHECK5:       .omp.final.then:
7880 // CHECK5-NEXT:    store i32 10, i32* [[I]], align 4
7881 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7882 // CHECK5:       .omp.final.done:
7883 // CHECK5-NEXT:    ret void
7884 //
7885 //
7886 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
7887 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] {
7888 // CHECK5-NEXT:  entry:
7889 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7890 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7891 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
7892 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
7893 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7894 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7895 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7896 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7897 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
7898 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
7899 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
7900 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
7901 // CHECK5-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
7902 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
7903 // CHECK5-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
7904 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
7905 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
7906 // CHECK5-NEXT:    ret void
7907 //
7908 //
7909 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..6
7910 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
7911 // CHECK5-NEXT:  entry:
7912 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7913 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7914 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7915 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7916 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7917 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7918 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7919 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7920 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7921 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7922 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
7923 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7924 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7925 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7926 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7927 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7928 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7929 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7930 // CHECK5-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
7931 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7932 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7933 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7934 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
7935 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7936 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7937 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
7938 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7939 // CHECK5:       cond.true:
7940 // CHECK5-NEXT:    br label [[COND_END:%.*]]
7941 // CHECK5:       cond.false:
7942 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7943 // CHECK5-NEXT:    br label [[COND_END]]
7944 // CHECK5:       cond.end:
7945 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
7946 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7947 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7948 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
7949 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7950 // CHECK5:       omp.inner.for.cond:
7951 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
7952 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
7953 // CHECK5-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
7954 // CHECK5-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7955 // CHECK5:       omp.inner.for.body:
7956 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
7957 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
7958 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7959 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
7960 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !33
7961 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
7962 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !33
7963 // CHECK5-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !33
7964 // CHECK5-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
7965 // CHECK5-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
7966 // CHECK5-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
7967 // CHECK5-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !33
7968 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7969 // CHECK5:       omp.body.continue:
7970 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7971 // CHECK5:       omp.inner.for.inc:
7972 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
7973 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
7974 // CHECK5-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
7975 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
7976 // CHECK5:       omp.inner.for.end:
7977 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7978 // CHECK5:       omp.loop.exit:
7979 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
7980 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7981 // CHECK5-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
7982 // CHECK5-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7983 // CHECK5:       .omp.final.then:
7984 // CHECK5-NEXT:    store i32 10, i32* [[I]], align 4
7985 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7986 // CHECK5:       .omp.final.done:
7987 // CHECK5-NEXT:    ret void
7988 //
7989 //
7990 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
7991 // CHECK5-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
7992 // CHECK5-NEXT:  entry:
7993 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7994 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
7995 // CHECK5-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
7996 // CHECK5-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
7997 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
7998 // CHECK5-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
7999 // CHECK5-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
8000 // CHECK5-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
8001 // CHECK5-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
8002 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8003 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8004 // CHECK5-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
8005 // CHECK5-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
8006 // CHECK5-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
8007 // CHECK5-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
8008 // CHECK5-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
8009 // CHECK5-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
8010 // CHECK5-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
8011 // CHECK5-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
8012 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8013 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
8014 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
8015 // CHECK5-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
8016 // CHECK5-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
8017 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
8018 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
8019 // CHECK5-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
8020 // CHECK5-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
8021 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
8022 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8023 // CHECK5-NEXT:    store i32 [[TMP8]], i32* [[CONV5]], align 4
8024 // CHECK5-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
8025 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
8026 // CHECK5-NEXT:    ret void
8027 //
8028 //
8029 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..9
8030 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR3]] {
8031 // CHECK5-NEXT:  entry:
8032 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8033 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8034 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8035 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
8036 // CHECK5-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
8037 // CHECK5-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
8038 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
8039 // CHECK5-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
8040 // CHECK5-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
8041 // CHECK5-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
8042 // CHECK5-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
8043 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8044 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8045 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8046 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8047 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8048 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8049 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
8050 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8051 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8052 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8053 // CHECK5-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
8054 // CHECK5-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
8055 // CHECK5-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
8056 // CHECK5-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
8057 // CHECK5-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
8058 // CHECK5-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
8059 // CHECK5-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
8060 // CHECK5-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
8061 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8062 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
8063 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
8064 // CHECK5-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
8065 // CHECK5-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
8066 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
8067 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
8068 // CHECK5-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
8069 // CHECK5-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
8070 // CHECK5-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 0
8071 // CHECK5-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
8072 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8073 // CHECK5-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
8074 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8075 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8076 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8077 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
8078 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8079 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8080 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
8081 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8082 // CHECK5:       cond.true:
8083 // CHECK5-NEXT:    br label [[COND_END:%.*]]
8084 // CHECK5:       cond.false:
8085 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8086 // CHECK5-NEXT:    br label [[COND_END]]
8087 // CHECK5:       cond.end:
8088 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
8089 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8090 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8091 // CHECK5-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
8092 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8093 // CHECK5:       omp.inner.for.cond:
8094 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
8095 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
8096 // CHECK5-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
8097 // CHECK5-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8098 // CHECK5:       omp.inner.for.body:
8099 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
8100 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
8101 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8102 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !36
8103 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !36
8104 // CHECK5-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
8105 // CHECK5-NEXT:    store i32 [[ADD6]], i32* [[CONV]], align 4, !llvm.access.group !36
8106 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
8107 // CHECK5-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36
8108 // CHECK5-NEXT:    [[CONV7:%.*]] = fpext float [[TMP17]] to double
8109 // CHECK5-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
8110 // CHECK5-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
8111 // CHECK5-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36
8112 // CHECK5-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
8113 // CHECK5-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
8114 // CHECK5-NEXT:    [[CONV11:%.*]] = fpext float [[TMP18]] to double
8115 // CHECK5-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
8116 // CHECK5-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
8117 // CHECK5-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
8118 // CHECK5-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
8119 // CHECK5-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i64 0, i64 2
8120 // CHECK5-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
8121 // CHECK5-NEXT:    [[ADD16:%.*]] = fadd double [[TMP19]], 1.000000e+00
8122 // CHECK5-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
8123 // CHECK5-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]]
8124 // CHECK5-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP20]]
8125 // CHECK5-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i64 3
8126 // CHECK5-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
8127 // CHECK5-NEXT:    [[ADD19:%.*]] = fadd double [[TMP21]], 1.000000e+00
8128 // CHECK5-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
8129 // CHECK5-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
8130 // CHECK5-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !36
8131 // CHECK5-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP22]], 1
8132 // CHECK5-NEXT:    store i64 [[ADD20]], i64* [[X]], align 8, !llvm.access.group !36
8133 // CHECK5-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
8134 // CHECK5-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !36
8135 // CHECK5-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP23]] to i32
8136 // CHECK5-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
8137 // CHECK5-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
8138 // CHECK5-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 8, !llvm.access.group !36
8139 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8140 // CHECK5:       omp.body.continue:
8141 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8142 // CHECK5:       omp.inner.for.inc:
8143 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
8144 // CHECK5-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP24]], 1
8145 // CHECK5-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
8146 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
8147 // CHECK5:       omp.inner.for.end:
8148 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8149 // CHECK5:       omp.loop.exit:
8150 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
8151 // CHECK5-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8152 // CHECK5-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
8153 // CHECK5-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8154 // CHECK5:       .omp.final.then:
8155 // CHECK5-NEXT:    store i32 10, i32* [[I]], align 4
8156 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8157 // CHECK5:       .omp.final.done:
8158 // CHECK5-NEXT:    ret void
8159 //
8160 //
8161 // CHECK5-LABEL: define {{[^@]+}}@_Z3bari
8162 // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
8163 // CHECK5-NEXT:  entry:
8164 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
8165 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
8166 // CHECK5-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
8167 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
8168 // CHECK5-NEXT:    store i32 0, i32* [[A]], align 4
8169 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
8170 // CHECK5-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
8171 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
8172 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
8173 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
8174 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
8175 // CHECK5-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
8176 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
8177 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
8178 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
8179 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
8180 // CHECK5-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
8181 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
8182 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
8183 // CHECK5-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
8184 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
8185 // CHECK5-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
8186 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
8187 // CHECK5-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
8188 // CHECK5-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
8189 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
8190 // CHECK5-NEXT:    ret i32 [[TMP8]]
8191 //
8192 //
8193 // CHECK5-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
8194 // CHECK5-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
8195 // CHECK5-NEXT:  entry:
8196 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
8197 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
8198 // CHECK5-NEXT:    [[B:%.*]] = alloca i32, align 4
8199 // CHECK5-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
8200 // CHECK5-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
8201 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
8202 // CHECK5-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
8203 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8204 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8
8205 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8
8206 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8
8207 // CHECK5-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8
8208 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8209 // CHECK5-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
8210 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
8211 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
8212 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
8213 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
8214 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
8215 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
8216 // CHECK5-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
8217 // CHECK5-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
8218 // CHECK5-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
8219 // CHECK5-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
8220 // CHECK5-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
8221 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
8222 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
8223 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
8224 // CHECK5-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
8225 // CHECK5-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
8226 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B]], align 4
8227 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
8228 // CHECK5-NEXT:    store i32 [[TMP6]], i32* [[CONV]], align 4
8229 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[B_CASTED]], align 8
8230 // CHECK5-NEXT:    [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
8231 // CHECK5-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
8232 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
8233 // CHECK5-NEXT:    [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL]] to i8
8234 // CHECK5-NEXT:    store i8 [[FROMBOOL3]], i8* [[CONV2]], align 1
8235 // CHECK5-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
8236 // CHECK5-NEXT:    [[TMP10:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
8237 // CHECK5-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP10]] to i1
8238 // CHECK5-NEXT:    br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
8239 // CHECK5:       omp_if.then:
8240 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
8241 // CHECK5-NEXT:    [[TMP11:%.*]] = mul nuw i64 2, [[TMP2]]
8242 // CHECK5-NEXT:    [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
8243 // CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8244 // CHECK5-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.S1**
8245 // CHECK5-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP14]], align 8
8246 // CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8247 // CHECK5-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
8248 // CHECK5-NEXT:    store double* [[A]], double** [[TMP16]], align 8
8249 // CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
8250 // CHECK5-NEXT:    store i64 8, i64* [[TMP17]], align 8
8251 // CHECK5-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
8252 // CHECK5-NEXT:    store i8* null, i8** [[TMP18]], align 8
8253 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
8254 // CHECK5-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
8255 // CHECK5-NEXT:    store i64 [[TMP7]], i64* [[TMP20]], align 8
8256 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
8257 // CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
8258 // CHECK5-NEXT:    store i64 [[TMP7]], i64* [[TMP22]], align 8
8259 // CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
8260 // CHECK5-NEXT:    store i64 4, i64* [[TMP23]], align 8
8261 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
8262 // CHECK5-NEXT:    store i8* null, i8** [[TMP24]], align 8
8263 // CHECK5-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
8264 // CHECK5-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64*
8265 // CHECK5-NEXT:    store i64 2, i64* [[TMP26]], align 8
8266 // CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
8267 // CHECK5-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64*
8268 // CHECK5-NEXT:    store i64 2, i64* [[TMP28]], align 8
8269 // CHECK5-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
8270 // CHECK5-NEXT:    store i64 8, i64* [[TMP29]], align 8
8271 // CHECK5-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
8272 // CHECK5-NEXT:    store i8* null, i8** [[TMP30]], align 8
8273 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
8274 // CHECK5-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
8275 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[TMP32]], align 8
8276 // CHECK5-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
8277 // CHECK5-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64*
8278 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[TMP34]], align 8
8279 // CHECK5-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
8280 // CHECK5-NEXT:    store i64 8, i64* [[TMP35]], align 8
8281 // CHECK5-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
8282 // CHECK5-NEXT:    store i8* null, i8** [[TMP36]], align 8
8283 // CHECK5-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
8284 // CHECK5-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16**
8285 // CHECK5-NEXT:    store i16* [[VLA]], i16** [[TMP38]], align 8
8286 // CHECK5-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
8287 // CHECK5-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16**
8288 // CHECK5-NEXT:    store i16* [[VLA]], i16** [[TMP40]], align 8
8289 // CHECK5-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
8290 // CHECK5-NEXT:    store i64 [[TMP12]], i64* [[TMP41]], align 8
8291 // CHECK5-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
8292 // CHECK5-NEXT:    store i8* null, i8** [[TMP42]], align 8
8293 // CHECK5-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
8294 // CHECK5-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i64*
8295 // CHECK5-NEXT:    store i64 [[TMP9]], i64* [[TMP44]], align 8
8296 // CHECK5-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
8297 // CHECK5-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64*
8298 // CHECK5-NEXT:    store i64 [[TMP9]], i64* [[TMP46]], align 8
8299 // CHECK5-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
8300 // CHECK5-NEXT:    store i64 1, i64* [[TMP47]], align 8
8301 // CHECK5-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
8302 // CHECK5-NEXT:    store i8* null, i8** [[TMP48]], align 8
8303 // CHECK5-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8304 // CHECK5-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8305 // CHECK5-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
8306 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
8307 // CHECK5-NEXT:    [[TMP52:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8308 // CHECK5-NEXT:    [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0
8309 // CHECK5-NEXT:    br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
8310 // CHECK5:       omp_offload.failed:
8311 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]]
8312 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
8313 // CHECK5:       omp_offload.cont:
8314 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
8315 // CHECK5:       omp_if.else:
8316 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]]
8317 // CHECK5-NEXT:    br label [[OMP_IF_END]]
8318 // CHECK5:       omp_if.end:
8319 // CHECK5-NEXT:    [[TMP54:%.*]] = mul nsw i64 1, [[TMP2]]
8320 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP54]]
8321 // CHECK5-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
8322 // CHECK5-NEXT:    [[TMP55:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2
8323 // CHECK5-NEXT:    [[CONV6:%.*]] = sext i16 [[TMP55]] to i32
8324 // CHECK5-NEXT:    [[TMP56:%.*]] = load i32, i32* [[B]], align 4
8325 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP56]]
8326 // CHECK5-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
8327 // CHECK5-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
8328 // CHECK5-NEXT:    ret i32 [[ADD7]]
8329 //
8330 //
8331 // CHECK5-LABEL: define {{[^@]+}}@_ZL7fstatici
8332 // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
8333 // CHECK5-NEXT:  entry:
8334 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
8335 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
8336 // CHECK5-NEXT:    [[AA:%.*]] = alloca i16, align 2
8337 // CHECK5-NEXT:    [[AAA:%.*]] = alloca i8, align 1
8338 // CHECK5-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
8339 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8340 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
8341 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
8342 // CHECK5-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
8343 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
8344 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
8345 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
8346 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8347 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8348 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
8349 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
8350 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
8351 // CHECK5-NEXT:    store i32 0, i32* [[A]], align 4
8352 // CHECK5-NEXT:    store i16 0, i16* [[AA]], align 2
8353 // CHECK5-NEXT:    store i8 0, i8* [[AAA]], align 1
8354 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
8355 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8356 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
8357 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
8358 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
8359 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
8360 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
8361 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
8362 // CHECK5-NEXT:    [[TMP4:%.*]] = load i16, i16* [[AA]], align 2
8363 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
8364 // CHECK5-NEXT:    store i16 [[TMP4]], i16* [[CONV2]], align 2
8365 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[AA_CASTED]], align 8
8366 // CHECK5-NEXT:    [[TMP6:%.*]] = load i8, i8* [[AAA]], align 1
8367 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
8368 // CHECK5-NEXT:    store i8 [[TMP6]], i8* [[CONV3]], align 1
8369 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
8370 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[N_ADDR]], align 4
8371 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50
8372 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
8373 // CHECK5:       omp_if.then:
8374 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8375 // CHECK5-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
8376 // CHECK5-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
8377 // CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8378 // CHECK5-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64*
8379 // CHECK5-NEXT:    store i64 [[TMP1]], i64* [[TMP12]], align 8
8380 // CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
8381 // CHECK5-NEXT:    store i8* null, i8** [[TMP13]], align 8
8382 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
8383 // CHECK5-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
8384 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
8385 // CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
8386 // CHECK5-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
8387 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP17]], align 8
8388 // CHECK5-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
8389 // CHECK5-NEXT:    store i8* null, i8** [[TMP18]], align 8
8390 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
8391 // CHECK5-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
8392 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[TMP20]], align 8
8393 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
8394 // CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
8395 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 8
8396 // CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
8397 // CHECK5-NEXT:    store i8* null, i8** [[TMP23]], align 8
8398 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
8399 // CHECK5-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
8400 // CHECK5-NEXT:    store i64 [[TMP7]], i64* [[TMP25]], align 8
8401 // CHECK5-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
8402 // CHECK5-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
8403 // CHECK5-NEXT:    store i64 [[TMP7]], i64* [[TMP27]], align 8
8404 // CHECK5-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
8405 // CHECK5-NEXT:    store i8* null, i8** [[TMP28]], align 8
8406 // CHECK5-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
8407 // CHECK5-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [10 x i32]**
8408 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP30]], align 8
8409 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
8410 // CHECK5-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [10 x i32]**
8411 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP32]], align 8
8412 // CHECK5-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
8413 // CHECK5-NEXT:    store i8* null, i8** [[TMP33]], align 8
8414 // CHECK5-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8415 // CHECK5-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8416 // CHECK5-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
8417 // CHECK5-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_]], align 4
8418 // CHECK5-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N_ADDR]], align 4
8419 // CHECK5-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_4]], align 4
8420 // CHECK5-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
8421 // CHECK5-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8422 // CHECK5-NEXT:    [[SUB:%.*]] = sub i32 [[TMP38]], [[TMP39]]
8423 // CHECK5-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
8424 // CHECK5-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
8425 // CHECK5-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
8426 // CHECK5-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
8427 // CHECK5-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
8428 // CHECK5-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
8429 // CHECK5-NEXT:    [[ADD8:%.*]] = add i32 [[TMP40]], 1
8430 // CHECK5-NEXT:    [[TMP41:%.*]] = zext i32 [[ADD8]] to i64
8431 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]])
8432 // CHECK5-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8433 // CHECK5-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
8434 // CHECK5-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
8435 // CHECK5:       omp_offload.failed:
8436 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
8437 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
8438 // CHECK5:       omp_offload.cont:
8439 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
8440 // CHECK5:       omp_if.else:
8441 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
8442 // CHECK5-NEXT:    br label [[OMP_IF_END]]
8443 // CHECK5:       omp_if.end:
8444 // CHECK5-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
8445 // CHECK5-NEXT:    ret i32 [[TMP44]]
8446 //
8447 //
8448 // CHECK5-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
8449 // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
8450 // CHECK5-NEXT:  entry:
8451 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
8452 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
8453 // CHECK5-NEXT:    [[AA:%.*]] = alloca i16, align 2
8454 // CHECK5-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
8455 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8456 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
8457 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
8458 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
8459 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
8460 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8461 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
8462 // CHECK5-NEXT:    store i32 0, i32* [[A]], align 4
8463 // CHECK5-NEXT:    store i16 0, i16* [[AA]], align 2
8464 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
8465 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8466 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
8467 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
8468 // CHECK5-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
8469 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
8470 // CHECK5-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
8471 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
8472 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
8473 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
8474 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
8475 // CHECK5:       omp_if.then:
8476 // CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8477 // CHECK5-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
8478 // CHECK5-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
8479 // CHECK5-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8480 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
8481 // CHECK5-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
8482 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
8483 // CHECK5-NEXT:    store i8* null, i8** [[TMP9]], align 8
8484 // CHECK5-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
8485 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
8486 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
8487 // CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
8488 // CHECK5-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
8489 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
8490 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
8491 // CHECK5-NEXT:    store i8* null, i8** [[TMP14]], align 8
8492 // CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
8493 // CHECK5-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
8494 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
8495 // CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
8496 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
8497 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
8498 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
8499 // CHECK5-NEXT:    store i8* null, i8** [[TMP19]], align 8
8500 // CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8501 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8502 // CHECK5-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
8503 // CHECK5-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
8504 // CHECK5-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
8505 // CHECK5-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
8506 // CHECK5:       omp_offload.failed:
8507 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
8508 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
8509 // CHECK5:       omp_offload.cont:
8510 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
8511 // CHECK5:       omp_if.else:
8512 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
8513 // CHECK5-NEXT:    br label [[OMP_IF_END]]
8514 // CHECK5:       omp_if.end:
8515 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
8516 // CHECK5-NEXT:    ret i32 [[TMP24]]
8517 //
8518 //
8519 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
8520 // CHECK5-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
8521 // CHECK5-NEXT:  entry:
8522 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
8523 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
8524 // CHECK5-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
8525 // CHECK5-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
8526 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
8527 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
8528 // CHECK5-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
8529 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8530 // CHECK5-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
8531 // CHECK5-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
8532 // CHECK5-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
8533 // CHECK5-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
8534 // CHECK5-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
8535 // CHECK5-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
8536 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
8537 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
8538 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
8539 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
8540 // CHECK5-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
8541 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
8542 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
8543 // CHECK5-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32*
8544 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[CONV4]], align 4
8545 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
8546 // CHECK5-NEXT:    [[TMP6:%.*]] = load i8, i8* [[CONV3]], align 1
8547 // CHECK5-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
8548 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
8549 // CHECK5-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
8550 // CHECK5-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV5]], align 1
8551 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
8552 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]], i64 [[TMP7]])
8553 // CHECK5-NEXT:    ret void
8554 //
8555 //
8556 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..11
8557 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
8558 // CHECK5-NEXT:  entry:
8559 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8560 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8561 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
8562 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
8563 // CHECK5-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
8564 // CHECK5-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
8565 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
8566 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
8567 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8568 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8569 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8570 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8571 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8572 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8573 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
8574 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8575 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8576 // CHECK5-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
8577 // CHECK5-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
8578 // CHECK5-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
8579 // CHECK5-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
8580 // CHECK5-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
8581 // CHECK5-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
8582 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
8583 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
8584 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
8585 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
8586 // CHECK5-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
8587 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
8588 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8589 // CHECK5-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
8590 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8591 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8592 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8593 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
8594 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8595 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8596 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
8597 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8598 // CHECK5:       cond.true:
8599 // CHECK5-NEXT:    br label [[COND_END:%.*]]
8600 // CHECK5:       cond.false:
8601 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8602 // CHECK5-NEXT:    br label [[COND_END]]
8603 // CHECK5:       cond.end:
8604 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
8605 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8606 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8607 // CHECK5-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
8608 // CHECK5-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1
8609 // CHECK5-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
8610 // CHECK5-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
8611 // CHECK5:       omp_if.then:
8612 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8613 // CHECK5:       omp.inner.for.cond:
8614 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
8615 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39
8616 // CHECK5-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
8617 // CHECK5-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8618 // CHECK5:       omp.inner.for.body:
8619 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
8620 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
8621 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8622 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39
8623 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !39
8624 // CHECK5-NEXT:    [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double
8625 // CHECK5-NEXT:    [[ADD6:%.*]] = fadd double [[CONV5]], 1.500000e+00
8626 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
8627 // CHECK5-NEXT:    store double [[ADD6]], double* [[A]], align 8, !llvm.access.group !39
8628 // CHECK5-NEXT:    [[A7:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
8629 // CHECK5-NEXT:    [[TMP14:%.*]] = load double, double* [[A7]], align 8, !llvm.access.group !39
8630 // CHECK5-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
8631 // CHECK5-NEXT:    store double [[INC]], double* [[A7]], align 8, !llvm.access.group !39
8632 // CHECK5-NEXT:    [[CONV8:%.*]] = fptosi double [[INC]] to i16
8633 // CHECK5-NEXT:    [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]]
8634 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]]
8635 // CHECK5-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
8636 // CHECK5-NEXT:    store i16 [[CONV8]], i16* [[ARRAYIDX9]], align 2, !llvm.access.group !39
8637 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8638 // CHECK5:       omp.body.continue:
8639 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8640 // CHECK5:       omp.inner.for.inc:
8641 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
8642 // CHECK5-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
8643 // CHECK5-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
8644 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
8645 // CHECK5:       omp.inner.for.end:
8646 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
8647 // CHECK5:       omp_if.else:
8648 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND11:%.*]]
8649 // CHECK5:       omp.inner.for.cond11:
8650 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8651 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8652 // CHECK5-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8653 // CHECK5-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END27:%.*]]
8654 // CHECK5:       omp.inner.for.body13:
8655 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8656 // CHECK5-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[TMP19]], 1
8657 // CHECK5-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
8658 // CHECK5-NEXT:    store i32 [[ADD15]], i32* [[I]], align 4
8659 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4
8660 // CHECK5-NEXT:    [[CONV16:%.*]] = sitofp i32 [[TMP20]] to double
8661 // CHECK5-NEXT:    [[ADD17:%.*]] = fadd double [[CONV16]], 1.500000e+00
8662 // CHECK5-NEXT:    [[A18:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
8663 // CHECK5-NEXT:    store double [[ADD17]], double* [[A18]], align 8
8664 // CHECK5-NEXT:    [[A19:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
8665 // CHECK5-NEXT:    [[TMP21:%.*]] = load double, double* [[A19]], align 8
8666 // CHECK5-NEXT:    [[INC20:%.*]] = fadd double [[TMP21]], 1.000000e+00
8667 // CHECK5-NEXT:    store double [[INC20]], double* [[A19]], align 8
8668 // CHECK5-NEXT:    [[CONV21:%.*]] = fptosi double [[INC20]] to i16
8669 // CHECK5-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
8670 // CHECK5-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP22]]
8671 // CHECK5-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX22]], i64 1
8672 // CHECK5-NEXT:    store i16 [[CONV21]], i16* [[ARRAYIDX23]], align 2
8673 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE24:%.*]]
8674 // CHECK5:       omp.body.continue24:
8675 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC25:%.*]]
8676 // CHECK5:       omp.inner.for.inc25:
8677 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8678 // CHECK5-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP23]], 1
8679 // CHECK5-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4
8680 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND11]], !llvm.loop [[LOOP42:![0-9]+]]
8681 // CHECK5:       omp.inner.for.end27:
8682 // CHECK5-NEXT:    br label [[OMP_IF_END]]
8683 // CHECK5:       omp_if.end:
8684 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8685 // CHECK5:       omp.loop.exit:
8686 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
8687 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8688 // CHECK5-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
8689 // CHECK5-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8690 // CHECK5:       .omp.final.then:
8691 // CHECK5-NEXT:    store i32 10, i32* [[I]], align 4
8692 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8693 // CHECK5:       .omp.final.done:
8694 // CHECK5-NEXT:    ret void
8695 //
8696 //
8697 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
8698 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
8699 // CHECK5-NEXT:  entry:
8700 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8701 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8702 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
8703 // CHECK5-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
8704 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
8705 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8706 // CHECK5-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
8707 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
8708 // CHECK5-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
8709 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8710 // CHECK5-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8711 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8712 // CHECK5-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
8713 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
8714 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8715 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8716 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8717 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
8718 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
8719 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
8720 // CHECK5-NEXT:    [[CONV4:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8721 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[CONV4]], align 4
8722 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
8723 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 4
8724 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[N_CASTED]] to i32*
8725 // CHECK5-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
8726 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
8727 // CHECK5-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV2]], align 2
8728 // CHECK5-NEXT:    [[CONV6:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
8729 // CHECK5-NEXT:    store i16 [[TMP5]], i16* [[CONV6]], align 2
8730 // CHECK5-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AA_CASTED]], align 8
8731 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
8732 // CHECK5-NEXT:    [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
8733 // CHECK5-NEXT:    store i8 [[TMP7]], i8* [[CONV7]], align 1
8734 // CHECK5-NEXT:    [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
8735 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]])
8736 // CHECK5-NEXT:    ret void
8737 //
8738 //
8739 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..13
8740 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
8741 // CHECK5-NEXT:  entry:
8742 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8743 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8744 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8745 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8746 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
8747 // CHECK5-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
8748 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
8749 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8750 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8751 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8752 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
8753 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
8754 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
8755 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8756 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8757 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8758 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8759 // CHECK5-NEXT:    [[I8:%.*]] = alloca i32, align 4
8760 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8761 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8762 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8763 // CHECK5-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8764 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8765 // CHECK5-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
8766 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
8767 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8768 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8769 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8770 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
8771 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
8772 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
8773 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
8774 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
8775 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_4]], align 4
8776 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
8777 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8778 // CHECK5-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
8779 // CHECK5-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
8780 // CHECK5-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
8781 // CHECK5-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
8782 // CHECK5-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
8783 // CHECK5-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
8784 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8785 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
8786 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8787 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
8788 // CHECK5-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
8789 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8790 // CHECK5:       omp.precond.then:
8791 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8792 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
8793 // CHECK5-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
8794 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8795 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8796 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8797 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
8798 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8799 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8800 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
8801 // CHECK5-NEXT:    [[CMP9:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
8802 // CHECK5-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8803 // CHECK5:       cond.true:
8804 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
8805 // CHECK5-NEXT:    br label [[COND_END:%.*]]
8806 // CHECK5:       cond.false:
8807 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8808 // CHECK5-NEXT:    br label [[COND_END]]
8809 // CHECK5:       cond.end:
8810 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
8811 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8812 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8813 // CHECK5-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
8814 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8815 // CHECK5:       omp.inner.for.cond:
8816 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8817 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !44
8818 // CHECK5-NEXT:    [[ADD10:%.*]] = add i32 [[TMP17]], 1
8819 // CHECK5-NEXT:    [[CMP11:%.*]] = icmp ult i32 [[TMP16]], [[ADD10]]
8820 // CHECK5-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8821 // CHECK5:       omp.inner.for.body:
8822 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !44
8823 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8824 // CHECK5-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
8825 // CHECK5-NEXT:    [[ADD12:%.*]] = add i32 [[TMP18]], [[MUL]]
8826 // CHECK5-NEXT:    store i32 [[ADD12]], i32* [[I8]], align 4, !llvm.access.group !44
8827 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !44
8828 // CHECK5-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP20]], 1
8829 // CHECK5-NEXT:    store i32 [[ADD13]], i32* [[CONV]], align 4, !llvm.access.group !44
8830 // CHECK5-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV2]], align 2, !llvm.access.group !44
8831 // CHECK5-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP21]] to i32
8832 // CHECK5-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
8833 // CHECK5-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
8834 // CHECK5-NEXT:    store i16 [[CONV16]], i16* [[CONV2]], align 2, !llvm.access.group !44
8835 // CHECK5-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV3]], align 1, !llvm.access.group !44
8836 // CHECK5-NEXT:    [[CONV17:%.*]] = sext i8 [[TMP22]] to i32
8837 // CHECK5-NEXT:    [[ADD18:%.*]] = add nsw i32 [[CONV17]], 1
8838 // CHECK5-NEXT:    [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
8839 // CHECK5-NEXT:    store i8 [[CONV19]], i8* [[CONV3]], align 1, !llvm.access.group !44
8840 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
8841 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
8842 // CHECK5-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP23]], 1
8843 // CHECK5-NEXT:    store i32 [[ADD20]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
8844 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8845 // CHECK5:       omp.body.continue:
8846 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8847 // CHECK5:       omp.inner.for.inc:
8848 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8849 // CHECK5-NEXT:    [[ADD21:%.*]] = add i32 [[TMP24]], 1
8850 // CHECK5-NEXT:    store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
8851 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
8852 // CHECK5:       omp.inner.for.end:
8853 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8854 // CHECK5:       omp.loop.exit:
8855 // CHECK5-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8856 // CHECK5-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
8857 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
8858 // CHECK5-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8859 // CHECK5-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
8860 // CHECK5-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8861 // CHECK5:       .omp.final.then:
8862 // CHECK5-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8863 // CHECK5-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
8864 // CHECK5-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8865 // CHECK5-NEXT:    [[SUB22:%.*]] = sub i32 [[TMP30]], [[TMP31]]
8866 // CHECK5-NEXT:    [[SUB23:%.*]] = sub i32 [[SUB22]], 1
8867 // CHECK5-NEXT:    [[ADD24:%.*]] = add i32 [[SUB23]], 1
8868 // CHECK5-NEXT:    [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
8869 // CHECK5-NEXT:    [[MUL26:%.*]] = mul i32 [[DIV25]], 1
8870 // CHECK5-NEXT:    [[ADD27:%.*]] = add i32 [[TMP29]], [[MUL26]]
8871 // CHECK5-NEXT:    store i32 [[ADD27]], i32* [[I8]], align 4
8872 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8873 // CHECK5:       .omp.final.done:
8874 // CHECK5-NEXT:    br label [[OMP_PRECOND_END]]
8875 // CHECK5:       omp.precond.end:
8876 // CHECK5-NEXT:    ret void
8877 //
8878 //
8879 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
8880 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
8881 // CHECK5-NEXT:  entry:
8882 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8883 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
8884 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
8885 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8886 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
8887 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8888 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8889 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
8890 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8891 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8892 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
8893 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
8894 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8895 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
8896 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
8897 // CHECK5-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
8898 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
8899 // CHECK5-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
8900 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
8901 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
8902 // CHECK5-NEXT:    ret void
8903 //
8904 //
8905 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..16
8906 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
8907 // CHECK5-NEXT:  entry:
8908 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8909 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8910 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8911 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
8912 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
8913 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8914 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8915 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8916 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8917 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8918 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8919 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
8920 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8921 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8922 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8923 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8924 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
8925 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8926 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8927 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
8928 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8929 // CHECK5-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
8930 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8931 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8932 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8933 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
8934 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8935 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8936 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
8937 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8938 // CHECK5:       cond.true:
8939 // CHECK5-NEXT:    br label [[COND_END:%.*]]
8940 // CHECK5:       cond.false:
8941 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8942 // CHECK5-NEXT:    br label [[COND_END]]
8943 // CHECK5:       cond.end:
8944 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
8945 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8946 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8947 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
8948 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8949 // CHECK5:       omp.inner.for.cond:
8950 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8951 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
8952 // CHECK5-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
8953 // CHECK5-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8954 // CHECK5:       omp.inner.for.body:
8955 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8956 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
8957 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8958 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !47
8959 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !47
8960 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
8961 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !47
8962 // CHECK5-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !47
8963 // CHECK5-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
8964 // CHECK5-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
8965 // CHECK5-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
8966 // CHECK5-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !47
8967 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
8968 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !47
8969 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
8970 // CHECK5-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !47
8971 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8972 // CHECK5:       omp.body.continue:
8973 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8974 // CHECK5:       omp.inner.for.inc:
8975 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8976 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP12]], 1
8977 // CHECK5-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
8978 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
8979 // CHECK5:       omp.inner.for.end:
8980 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8981 // CHECK5:       omp.loop.exit:
8982 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
8983 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8984 // CHECK5-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
8985 // CHECK5-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8986 // CHECK5:       .omp.final.then:
8987 // CHECK5-NEXT:    store i32 10, i32* [[I]], align 4
8988 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8989 // CHECK5:       .omp.final.done:
8990 // CHECK5-NEXT:    ret void
8991 //
8992 //
8993 // CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
8994 // CHECK5-SAME: () #[[ATTR5]] {
8995 // CHECK5-NEXT:  entry:
8996 // CHECK5-NEXT:    call void @__tgt_register_requires(i64 1)
8997 // CHECK5-NEXT:    ret void
8998 //
8999 //
9000 // CHECK6-LABEL: define {{[^@]+}}@_Z3fooi
9001 // CHECK6-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
9002 // CHECK6-NEXT:  entry:
9003 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
9004 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
9005 // CHECK6-NEXT:    [[AA:%.*]] = alloca i16, align 2
9006 // CHECK6-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
9007 // CHECK6-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
9008 // CHECK6-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
9009 // CHECK6-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
9010 // CHECK6-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
9011 // CHECK6-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
9012 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9013 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9014 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
9015 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9016 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED4:%.*]] = alloca i64, align 8
9017 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
9018 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
9019 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
9020 // CHECK6-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
9021 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
9022 // CHECK6-NEXT:    [[AA_CASTED7:%.*]] = alloca i64, align 8
9023 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS9:%.*]] = alloca [1 x i8*], align 8
9024 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS10:%.*]] = alloca [1 x i8*], align 8
9025 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS11:%.*]] = alloca [1 x i8*], align 8
9026 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9027 // CHECK6-NEXT:    [[A_CASTED12:%.*]] = alloca i64, align 8
9028 // CHECK6-NEXT:    [[AA_CASTED14:%.*]] = alloca i64, align 8
9029 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [2 x i8*], align 8
9030 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS17:%.*]] = alloca [2 x i8*], align 8
9031 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [2 x i8*], align 8
9032 // CHECK6-NEXT:    [[_TMP19:%.*]] = alloca i32, align 4
9033 // CHECK6-NEXT:    [[A_CASTED22:%.*]] = alloca i64, align 8
9034 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [9 x i8*], align 8
9035 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS27:%.*]] = alloca [9 x i8*], align 8
9036 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [9 x i8*], align 8
9037 // CHECK6-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8
9038 // CHECK6-NEXT:    [[_TMP29:%.*]] = alloca i32, align 4
9039 // CHECK6-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
9040 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
9041 // CHECK6-NEXT:    store i32 0, i32* [[A]], align 4
9042 // CHECK6-NEXT:    store i16 0, i16* [[AA]], align 2
9043 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
9044 // CHECK6-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
9045 // CHECK6-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
9046 // CHECK6-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
9047 // CHECK6-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
9048 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
9049 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
9050 // CHECK6-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
9051 // CHECK6-NEXT:    [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
9052 // CHECK6-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
9053 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
9054 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
9055 // CHECK6-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_]], align 4
9056 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
9057 // CHECK6-NEXT:    store i32 [[TMP8]], i32* [[DOTCAPTURE_EXPR_2]], align 4
9058 // CHECK6-NEXT:    [[TMP9:%.*]] = load i16, i16* [[AA]], align 2
9059 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
9060 // CHECK6-NEXT:    store i16 [[TMP9]], i16* [[CONV]], align 2
9061 // CHECK6-NEXT:    [[TMP10:%.*]] = load i64, i64* [[AA_CASTED]], align 8
9062 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9063 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
9064 // CHECK6-NEXT:    store i32 [[TMP11]], i32* [[CONV3]], align 4
9065 // CHECK6-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
9066 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9067 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED4]] to i32*
9068 // CHECK6-NEXT:    store i32 [[TMP13]], i32* [[CONV5]], align 4
9069 // CHECK6-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED4]], align 8
9070 // CHECK6-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9071 // CHECK6-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64*
9072 // CHECK6-NEXT:    store i64 [[TMP10]], i64* [[TMP16]], align 8
9073 // CHECK6-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9074 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
9075 // CHECK6-NEXT:    store i64 [[TMP10]], i64* [[TMP18]], align 8
9076 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
9077 // CHECK6-NEXT:    store i8* null, i8** [[TMP19]], align 8
9078 // CHECK6-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
9079 // CHECK6-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64*
9080 // CHECK6-NEXT:    store i64 [[TMP12]], i64* [[TMP21]], align 8
9081 // CHECK6-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
9082 // CHECK6-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64*
9083 // CHECK6-NEXT:    store i64 [[TMP12]], i64* [[TMP23]], align 8
9084 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
9085 // CHECK6-NEXT:    store i8* null, i8** [[TMP24]], align 8
9086 // CHECK6-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
9087 // CHECK6-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64*
9088 // CHECK6-NEXT:    store i64 [[TMP14]], i64* [[TMP26]], align 8
9089 // CHECK6-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
9090 // CHECK6-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64*
9091 // CHECK6-NEXT:    store i64 [[TMP14]], i64* [[TMP28]], align 8
9092 // CHECK6-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
9093 // CHECK6-NEXT:    store i8* null, i8** [[TMP29]], align 8
9094 // CHECK6-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9095 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9096 // CHECK6-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
9097 // CHECK6-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2
9098 // CHECK6-NEXT:    store i16 [[TMP33]], i16* [[TMP32]], align 4
9099 // CHECK6-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
9100 // CHECK6-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9101 // CHECK6-NEXT:    store i32 [[TMP35]], i32* [[TMP34]], align 4
9102 // CHECK6-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
9103 // CHECK6-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9104 // CHECK6-NEXT:    store i32 [[TMP37]], i32* [[TMP36]], align 4
9105 // CHECK6-NEXT:    [[TMP38:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
9106 // CHECK6-NEXT:    [[TMP39:%.*]] = bitcast i8* [[TMP38]] to %struct.kmp_task_t_with_privates*
9107 // CHECK6-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 0
9108 // CHECK6-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP40]], i32 0, i32 0
9109 // CHECK6-NEXT:    [[TMP42:%.*]] = load i8*, i8** [[TMP41]], align 8
9110 // CHECK6-NEXT:    [[TMP43:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
9111 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i64 12, i1 false)
9112 // CHECK6-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 1
9113 // CHECK6-NEXT:    [[TMP45:%.*]] = bitcast i8* [[TMP42]] to %struct.anon*
9114 // CHECK6-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 0
9115 // CHECK6-NEXT:    [[TMP47:%.*]] = bitcast [3 x i8*]* [[TMP46]] to i8*
9116 // CHECK6-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP30]] to i8*
9117 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP47]], i8* align 8 [[TMP48]], i64 24, i1 false)
9118 // CHECK6-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 1
9119 // CHECK6-NEXT:    [[TMP50:%.*]] = bitcast [3 x i8*]* [[TMP49]] to i8*
9120 // CHECK6-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP31]] to i8*
9121 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP50]], i8* align 8 [[TMP51]], i64 24, i1 false)
9122 // CHECK6-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 2
9123 // CHECK6-NEXT:    [[TMP53:%.*]] = bitcast [3 x i64]* [[TMP52]] to i8*
9124 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP53]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes to i8*), i64 24, i1 false)
9125 // CHECK6-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 3
9126 // CHECK6-NEXT:    [[TMP55:%.*]] = load i16, i16* [[AA]], align 2
9127 // CHECK6-NEXT:    store i16 [[TMP55]], i16* [[TMP54]], align 8
9128 // CHECK6-NEXT:    [[TMP56:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP38]])
9129 // CHECK6-NEXT:    [[TMP57:%.*]] = load i32, i32* [[A]], align 4
9130 // CHECK6-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
9131 // CHECK6-NEXT:    store i32 [[TMP57]], i32* [[CONV6]], align 4
9132 // CHECK6-NEXT:    [[TMP58:%.*]] = load i64, i64* [[A_CASTED]], align 8
9133 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102(i64 [[TMP58]]) #[[ATTR4:[0-9]+]]
9134 // CHECK6-NEXT:    [[TMP59:%.*]] = load i16, i16* [[AA]], align 2
9135 // CHECK6-NEXT:    [[CONV8:%.*]] = bitcast i64* [[AA_CASTED7]] to i16*
9136 // CHECK6-NEXT:    store i16 [[TMP59]], i16* [[CONV8]], align 2
9137 // CHECK6-NEXT:    [[TMP60:%.*]] = load i64, i64* [[AA_CASTED7]], align 8
9138 // CHECK6-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
9139 // CHECK6-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64*
9140 // CHECK6-NEXT:    store i64 [[TMP60]], i64* [[TMP62]], align 8
9141 // CHECK6-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
9142 // CHECK6-NEXT:    [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64*
9143 // CHECK6-NEXT:    store i64 [[TMP60]], i64* [[TMP64]], align 8
9144 // CHECK6-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS11]], i64 0, i64 0
9145 // CHECK6-NEXT:    store i8* null, i8** [[TMP65]], align 8
9146 // CHECK6-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS9]], i32 0, i32 0
9147 // CHECK6-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS10]], i32 0, i32 0
9148 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
9149 // CHECK6-NEXT:    [[TMP68:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111.region_id, i32 1, i8** [[TMP66]], i8** [[TMP67]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9150 // CHECK6-NEXT:    [[TMP69:%.*]] = icmp ne i32 [[TMP68]], 0
9151 // CHECK6-NEXT:    br i1 [[TMP69]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
9152 // CHECK6:       omp_offload.failed:
9153 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i64 [[TMP60]]) #[[ATTR4]]
9154 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
9155 // CHECK6:       omp_offload.cont:
9156 // CHECK6-NEXT:    [[TMP70:%.*]] = load i32, i32* [[A]], align 4
9157 // CHECK6-NEXT:    [[CONV13:%.*]] = bitcast i64* [[A_CASTED12]] to i32*
9158 // CHECK6-NEXT:    store i32 [[TMP70]], i32* [[CONV13]], align 4
9159 // CHECK6-NEXT:    [[TMP71:%.*]] = load i64, i64* [[A_CASTED12]], align 8
9160 // CHECK6-NEXT:    [[TMP72:%.*]] = load i16, i16* [[AA]], align 2
9161 // CHECK6-NEXT:    [[CONV15:%.*]] = bitcast i64* [[AA_CASTED14]] to i16*
9162 // CHECK6-NEXT:    store i16 [[TMP72]], i16* [[CONV15]], align 2
9163 // CHECK6-NEXT:    [[TMP73:%.*]] = load i64, i64* [[AA_CASTED14]], align 8
9164 // CHECK6-NEXT:    [[TMP74:%.*]] = load i32, i32* [[N_ADDR]], align 4
9165 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP74]], 10
9166 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
9167 // CHECK6:       omp_if.then:
9168 // CHECK6-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
9169 // CHECK6-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
9170 // CHECK6-NEXT:    store i64 [[TMP71]], i64* [[TMP76]], align 8
9171 // CHECK6-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
9172 // CHECK6-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
9173 // CHECK6-NEXT:    store i64 [[TMP71]], i64* [[TMP78]], align 8
9174 // CHECK6-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0
9175 // CHECK6-NEXT:    store i8* null, i8** [[TMP79]], align 8
9176 // CHECK6-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1
9177 // CHECK6-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
9178 // CHECK6-NEXT:    store i64 [[TMP73]], i64* [[TMP81]], align 8
9179 // CHECK6-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1
9180 // CHECK6-NEXT:    [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64*
9181 // CHECK6-NEXT:    store i64 [[TMP73]], i64* [[TMP83]], align 8
9182 // CHECK6-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1
9183 // CHECK6-NEXT:    store i8* null, i8** [[TMP84]], align 8
9184 // CHECK6-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0
9185 // CHECK6-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0
9186 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
9187 // CHECK6-NEXT:    [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118.region_id, i32 2, i8** [[TMP85]], i8** [[TMP86]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9188 // CHECK6-NEXT:    [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0
9189 // CHECK6-NEXT:    br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
9190 // CHECK6:       omp_offload.failed20:
9191 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP71]], i64 [[TMP73]]) #[[ATTR4]]
9192 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT21]]
9193 // CHECK6:       omp_offload.cont21:
9194 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
9195 // CHECK6:       omp_if.else:
9196 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP71]], i64 [[TMP73]]) #[[ATTR4]]
9197 // CHECK6-NEXT:    br label [[OMP_IF_END]]
9198 // CHECK6:       omp_if.end:
9199 // CHECK6-NEXT:    [[TMP89:%.*]] = load i32, i32* [[A]], align 4
9200 // CHECK6-NEXT:    [[CONV23:%.*]] = bitcast i64* [[A_CASTED22]] to i32*
9201 // CHECK6-NEXT:    store i32 [[TMP89]], i32* [[CONV23]], align 4
9202 // CHECK6-NEXT:    [[TMP90:%.*]] = load i64, i64* [[A_CASTED22]], align 8
9203 // CHECK6-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N_ADDR]], align 4
9204 // CHECK6-NEXT:    [[CMP24:%.*]] = icmp sgt i32 [[TMP91]], 20
9205 // CHECK6-NEXT:    br i1 [[CMP24]], label [[OMP_IF_THEN25:%.*]], label [[OMP_IF_ELSE32:%.*]]
9206 // CHECK6:       omp_if.then25:
9207 // CHECK6-NEXT:    [[TMP92:%.*]] = mul nuw i64 [[TMP2]], 4
9208 // CHECK6-NEXT:    [[TMP93:%.*]] = mul nuw i64 5, [[TMP5]]
9209 // CHECK6-NEXT:    [[TMP94:%.*]] = mul nuw i64 [[TMP93]], 8
9210 // CHECK6-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
9211 // CHECK6-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64*
9212 // CHECK6-NEXT:    store i64 [[TMP90]], i64* [[TMP96]], align 8
9213 // CHECK6-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
9214 // CHECK6-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64*
9215 // CHECK6-NEXT:    store i64 [[TMP90]], i64* [[TMP98]], align 8
9216 // CHECK6-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
9217 // CHECK6-NEXT:    store i64 4, i64* [[TMP99]], align 8
9218 // CHECK6-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0
9219 // CHECK6-NEXT:    store i8* null, i8** [[TMP100]], align 8
9220 // CHECK6-NEXT:    [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1
9221 // CHECK6-NEXT:    [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]**
9222 // CHECK6-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8
9223 // CHECK6-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1
9224 // CHECK6-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]**
9225 // CHECK6-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8
9226 // CHECK6-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
9227 // CHECK6-NEXT:    store i64 40, i64* [[TMP105]], align 8
9228 // CHECK6-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1
9229 // CHECK6-NEXT:    store i8* null, i8** [[TMP106]], align 8
9230 // CHECK6-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2
9231 // CHECK6-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
9232 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[TMP108]], align 8
9233 // CHECK6-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2
9234 // CHECK6-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64*
9235 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[TMP110]], align 8
9236 // CHECK6-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
9237 // CHECK6-NEXT:    store i64 8, i64* [[TMP111]], align 8
9238 // CHECK6-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2
9239 // CHECK6-NEXT:    store i8* null, i8** [[TMP112]], align 8
9240 // CHECK6-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3
9241 // CHECK6-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float**
9242 // CHECK6-NEXT:    store float* [[VLA]], float** [[TMP114]], align 8
9243 // CHECK6-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3
9244 // CHECK6-NEXT:    [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float**
9245 // CHECK6-NEXT:    store float* [[VLA]], float** [[TMP116]], align 8
9246 // CHECK6-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
9247 // CHECK6-NEXT:    store i64 [[TMP92]], i64* [[TMP117]], align 8
9248 // CHECK6-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3
9249 // CHECK6-NEXT:    store i8* null, i8** [[TMP118]], align 8
9250 // CHECK6-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4
9251 // CHECK6-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]**
9252 // CHECK6-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8
9253 // CHECK6-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4
9254 // CHECK6-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]**
9255 // CHECK6-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8
9256 // CHECK6-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
9257 // CHECK6-NEXT:    store i64 400, i64* [[TMP123]], align 8
9258 // CHECK6-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4
9259 // CHECK6-NEXT:    store i8* null, i8** [[TMP124]], align 8
9260 // CHECK6-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5
9261 // CHECK6-NEXT:    [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64*
9262 // CHECK6-NEXT:    store i64 5, i64* [[TMP126]], align 8
9263 // CHECK6-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5
9264 // CHECK6-NEXT:    [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64*
9265 // CHECK6-NEXT:    store i64 5, i64* [[TMP128]], align 8
9266 // CHECK6-NEXT:    [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
9267 // CHECK6-NEXT:    store i64 8, i64* [[TMP129]], align 8
9268 // CHECK6-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5
9269 // CHECK6-NEXT:    store i8* null, i8** [[TMP130]], align 8
9270 // CHECK6-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6
9271 // CHECK6-NEXT:    [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64*
9272 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[TMP132]], align 8
9273 // CHECK6-NEXT:    [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6
9274 // CHECK6-NEXT:    [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64*
9275 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[TMP134]], align 8
9276 // CHECK6-NEXT:    [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
9277 // CHECK6-NEXT:    store i64 8, i64* [[TMP135]], align 8
9278 // CHECK6-NEXT:    [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6
9279 // CHECK6-NEXT:    store i8* null, i8** [[TMP136]], align 8
9280 // CHECK6-NEXT:    [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7
9281 // CHECK6-NEXT:    [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double**
9282 // CHECK6-NEXT:    store double* [[VLA1]], double** [[TMP138]], align 8
9283 // CHECK6-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7
9284 // CHECK6-NEXT:    [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double**
9285 // CHECK6-NEXT:    store double* [[VLA1]], double** [[TMP140]], align 8
9286 // CHECK6-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
9287 // CHECK6-NEXT:    store i64 [[TMP94]], i64* [[TMP141]], align 8
9288 // CHECK6-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7
9289 // CHECK6-NEXT:    store i8* null, i8** [[TMP142]], align 8
9290 // CHECK6-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8
9291 // CHECK6-NEXT:    [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT**
9292 // CHECK6-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8
9293 // CHECK6-NEXT:    [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8
9294 // CHECK6-NEXT:    [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT**
9295 // CHECK6-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8
9296 // CHECK6-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
9297 // CHECK6-NEXT:    store i64 16, i64* [[TMP147]], align 8
9298 // CHECK6-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8
9299 // CHECK6-NEXT:    store i8* null, i8** [[TMP148]], align 8
9300 // CHECK6-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
9301 // CHECK6-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
9302 // CHECK6-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
9303 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
9304 // CHECK6-NEXT:    [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9305 // CHECK6-NEXT:    [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0
9306 // CHECK6-NEXT:    br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
9307 // CHECK6:       omp_offload.failed30:
9308 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
9309 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
9310 // CHECK6:       omp_offload.cont31:
9311 // CHECK6-NEXT:    br label [[OMP_IF_END33:%.*]]
9312 // CHECK6:       omp_if.else32:
9313 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
9314 // CHECK6-NEXT:    br label [[OMP_IF_END33]]
9315 // CHECK6:       omp_if.end33:
9316 // CHECK6-NEXT:    [[TMP154:%.*]] = load i32, i32* [[A]], align 4
9317 // CHECK6-NEXT:    [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
9318 // CHECK6-NEXT:    call void @llvm.stackrestore(i8* [[TMP155]])
9319 // CHECK6-NEXT:    ret i32 [[TMP154]]
9320 //
9321 //
9322 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
9323 // CHECK6-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
9324 // CHECK6-NEXT:  entry:
9325 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9326 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9327 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
9328 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
9329 // CHECK6-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
9330 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9331 // CHECK6-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
9332 // CHECK6-NEXT:    store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8
9333 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9334 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
9335 // CHECK6-NEXT:    [[CONV4:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32*
9336 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV3]], align 4
9337 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV4]], align 4
9338 // CHECK6-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
9339 // CHECK6-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
9340 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
9341 // CHECK6-NEXT:    store i16 [[TMP3]], i16* [[CONV5]], align 2
9342 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
9343 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP4]])
9344 // CHECK6-NEXT:    ret void
9345 //
9346 //
9347 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
9348 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
9349 // CHECK6-NEXT:  entry:
9350 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9351 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9352 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9353 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9354 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9355 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9356 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9357 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9358 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9359 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
9360 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9361 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9362 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9363 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9364 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9365 // CHECK6-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
9366 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9367 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9368 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9369 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
9370 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9371 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9372 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
9373 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9374 // CHECK6:       cond.true:
9375 // CHECK6-NEXT:    br label [[COND_END:%.*]]
9376 // CHECK6:       cond.false:
9377 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9378 // CHECK6-NEXT:    br label [[COND_END]]
9379 // CHECK6:       cond.end:
9380 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
9381 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9382 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9383 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
9384 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9385 // CHECK6:       omp.inner.for.cond:
9386 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
9387 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
9388 // CHECK6-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
9389 // CHECK6-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9390 // CHECK6:       omp.inner.for.body:
9391 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
9392 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
9393 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9394 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
9395 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9396 // CHECK6:       omp.body.continue:
9397 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9398 // CHECK6:       omp.inner.for.inc:
9399 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
9400 // CHECK6-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
9401 // CHECK6-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
9402 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
9403 // CHECK6:       omp.inner.for.end:
9404 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9405 // CHECK6:       omp.loop.exit:
9406 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
9407 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9408 // CHECK6-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
9409 // CHECK6-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9410 // CHECK6:       .omp.final.then:
9411 // CHECK6-NEXT:    store i32 10, i32* [[I]], align 4
9412 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9413 // CHECK6:       .omp.final.done:
9414 // CHECK6-NEXT:    ret void
9415 //
9416 //
9417 // CHECK6-LABEL: define {{[^@]+}}@.omp_task_privates_map.
9418 // CHECK6-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i16** noalias noundef [[TMP1:%.*]], [3 x i8*]** noalias noundef [[TMP2:%.*]], [3 x i8*]** noalias noundef [[TMP3:%.*]], [3 x i64]** noalias noundef [[TMP4:%.*]]) #[[ATTR5:[0-9]+]] {
9419 // CHECK6-NEXT:  entry:
9420 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
9421 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca i16**, align 8
9422 // CHECK6-NEXT:    [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 8
9423 // CHECK6-NEXT:    [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 8
9424 // CHECK6-NEXT:    [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 8
9425 // CHECK6-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
9426 // CHECK6-NEXT:    store i16** [[TMP1]], i16*** [[DOTADDR1]], align 8
9427 // CHECK6-NEXT:    store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 8
9428 // CHECK6-NEXT:    store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 8
9429 // CHECK6-NEXT:    store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 8
9430 // CHECK6-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
9431 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0
9432 // CHECK6-NEXT:    [[TMP7:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 8
9433 // CHECK6-NEXT:    store [3 x i8*]* [[TMP6]], [3 x i8*]** [[TMP7]], align 8
9434 // CHECK6-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1
9435 // CHECK6-NEXT:    [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 8
9436 // CHECK6-NEXT:    store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 8
9437 // CHECK6-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2
9438 // CHECK6-NEXT:    [[TMP11:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 8
9439 // CHECK6-NEXT:    store [3 x i64]* [[TMP10]], [3 x i64]** [[TMP11]], align 8
9440 // CHECK6-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3
9441 // CHECK6-NEXT:    [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 8
9442 // CHECK6-NEXT:    store i16* [[TMP12]], i16** [[TMP13]], align 8
9443 // CHECK6-NEXT:    ret void
9444 //
9445 //
9446 // CHECK6-LABEL: define {{[^@]+}}@.omp_task_entry.
9447 // CHECK6-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
9448 // CHECK6-NEXT:  entry:
9449 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
9450 // CHECK6-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
9451 // CHECK6-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
9452 // CHECK6-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
9453 // CHECK6-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
9454 // CHECK6-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
9455 // CHECK6-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 8
9456 // CHECK6-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 8
9457 // CHECK6-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 8
9458 // CHECK6-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 8
9459 // CHECK6-NEXT:    [[AA_CASTED_I:%.*]] = alloca i64, align 8
9460 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED_I:%.*]] = alloca i64, align 8
9461 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED5_I:%.*]] = alloca i64, align 8
9462 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
9463 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
9464 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
9465 // CHECK6-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
9466 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
9467 // CHECK6-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
9468 // CHECK6-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
9469 // CHECK6-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
9470 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
9471 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
9472 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
9473 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
9474 // CHECK6-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
9475 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
9476 // CHECK6-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
9477 // CHECK6-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
9478 // CHECK6-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
9479 // CHECK6-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
9480 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26
9481 // CHECK6-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !26
9482 // CHECK6-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
9483 // CHECK6-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
9484 // CHECK6-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !26
9485 // CHECK6-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
9486 // CHECK6-NEXT:    [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
9487 // CHECK6-NEXT:    [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
9488 // CHECK6-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
9489 // CHECK6-NEXT:    [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)*
9490 // CHECK6-NEXT:    call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
9491 // CHECK6-NEXT:    [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !26
9492 // CHECK6-NEXT:    [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !26
9493 // CHECK6-NEXT:    [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !26
9494 // CHECK6-NEXT:    [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !26
9495 // CHECK6-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i64 0, i64 0
9496 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i64 0, i64 0
9497 // CHECK6-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i64 0, i64 0
9498 // CHECK6-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
9499 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
9500 // CHECK6-NEXT:    [[TMP25:%.*]] = load i32, i32* [[TMP23]], align 4
9501 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) #[[ATTR4]]
9502 // CHECK6-NEXT:    [[TMP26:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 [[TMP25]], i32 1, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
9503 // CHECK6-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
9504 // CHECK6-NEXT:    br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
9505 // CHECK6:       omp_offload.failed.i:
9506 // CHECK6-NEXT:    [[TMP28:%.*]] = load i16, i16* [[TMP16]], align 2
9507 // CHECK6-NEXT:    [[CONV_I:%.*]] = bitcast i64* [[AA_CASTED_I]] to i16*
9508 // CHECK6-NEXT:    store i16 [[TMP28]], i16* [[CONV_I]], align 2, !noalias !26
9509 // CHECK6-NEXT:    [[TMP29:%.*]] = load i64, i64* [[AA_CASTED_I]], align 8, !noalias !26
9510 // CHECK6-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP23]], align 4
9511 // CHECK6-NEXT:    [[CONV4_I:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED_I]] to i32*
9512 // CHECK6-NEXT:    store i32 [[TMP30]], i32* [[CONV4_I]], align 4, !noalias !26
9513 // CHECK6-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !26
9514 // CHECK6-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP24]], align 4
9515 // CHECK6-NEXT:    [[CONV6_I:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED5_I]] to i32*
9516 // CHECK6-NEXT:    store i32 [[TMP32]], i32* [[CONV6_I]], align 4, !noalias !26
9517 // CHECK6-NEXT:    [[TMP33:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED5_I]], align 8, !noalias !26
9518 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i64 [[TMP29]], i64 [[TMP31]], i64 [[TMP33]]) #[[ATTR4]]
9519 // CHECK6-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
9520 // CHECK6:       .omp_outlined..1.exit:
9521 // CHECK6-NEXT:    ret i32 0
9522 //
9523 //
9524 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102
9525 // CHECK6-SAME: (i64 noundef [[A:%.*]]) #[[ATTR3]] {
9526 // CHECK6-NEXT:  entry:
9527 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9528 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
9529 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9530 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9531 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
9532 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
9533 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[CONV1]], align 4
9534 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
9535 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]])
9536 // CHECK6-NEXT:    ret void
9537 //
9538 //
9539 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..2
9540 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
9541 // CHECK6-NEXT:  entry:
9542 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9543 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9544 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9545 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9546 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9547 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9548 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9549 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9550 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9551 // CHECK6-NEXT:    [[A1:%.*]] = alloca i32, align 4
9552 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9553 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9554 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9555 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9556 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9557 // CHECK6-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
9558 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9559 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9560 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9561 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
9562 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9563 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9564 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
9565 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9566 // CHECK6:       cond.true:
9567 // CHECK6-NEXT:    br label [[COND_END:%.*]]
9568 // CHECK6:       cond.false:
9569 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9570 // CHECK6-NEXT:    br label [[COND_END]]
9571 // CHECK6:       cond.end:
9572 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
9573 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9574 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9575 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
9576 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9577 // CHECK6:       omp.inner.for.cond:
9578 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9579 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9580 // CHECK6-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
9581 // CHECK6-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9582 // CHECK6:       omp.inner.for.body:
9583 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9584 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
9585 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9586 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4, !nontemporal !27
9587 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A1]], align 4, !nontemporal !27
9588 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
9589 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[A1]], align 4, !nontemporal !27
9590 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9591 // CHECK6:       omp.body.continue:
9592 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9593 // CHECK6:       omp.inner.for.inc:
9594 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
9595 // CHECK6-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1
9596 // CHECK6-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
9597 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
9598 // CHECK6:       omp.inner.for.end:
9599 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9600 // CHECK6:       omp.loop.exit:
9601 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
9602 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9603 // CHECK6-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
9604 // CHECK6-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9605 // CHECK6:       .omp.final.then:
9606 // CHECK6-NEXT:    store i32 10, i32* [[CONV]], align 4
9607 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9608 // CHECK6:       .omp.final.done:
9609 // CHECK6-NEXT:    ret void
9610 //
9611 //
9612 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
9613 // CHECK6-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR2]] {
9614 // CHECK6-NEXT:  entry:
9615 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9616 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
9617 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9618 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9619 // CHECK6-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
9620 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
9621 // CHECK6-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
9622 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
9623 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]])
9624 // CHECK6-NEXT:    ret void
9625 //
9626 //
9627 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..3
9628 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
9629 // CHECK6-NEXT:  entry:
9630 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9631 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9632 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9633 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9634 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9635 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9636 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9637 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9638 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9639 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
9640 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9641 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9642 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9643 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9644 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9645 // CHECK6-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
9646 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9647 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9648 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9649 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
9650 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9651 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9652 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
9653 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9654 // CHECK6:       cond.true:
9655 // CHECK6-NEXT:    br label [[COND_END:%.*]]
9656 // CHECK6:       cond.false:
9657 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9658 // CHECK6-NEXT:    br label [[COND_END]]
9659 // CHECK6:       cond.end:
9660 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
9661 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9662 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9663 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
9664 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9665 // CHECK6:       omp.inner.for.cond:
9666 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
9667 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
9668 // CHECK6-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
9669 // CHECK6-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9670 // CHECK6:       omp.inner.for.body:
9671 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
9672 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
9673 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9674 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !30
9675 // CHECK6-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
9676 // CHECK6-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
9677 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
9678 // CHECK6-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
9679 // CHECK6-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !30
9680 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9681 // CHECK6:       omp.body.continue:
9682 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9683 // CHECK6:       omp.inner.for.inc:
9684 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
9685 // CHECK6-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
9686 // CHECK6-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
9687 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
9688 // CHECK6:       omp.inner.for.end:
9689 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9690 // CHECK6:       omp.loop.exit:
9691 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
9692 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9693 // CHECK6-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
9694 // CHECK6-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9695 // CHECK6:       .omp.final.then:
9696 // CHECK6-NEXT:    store i32 10, i32* [[I]], align 4
9697 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9698 // CHECK6:       .omp.final.done:
9699 // CHECK6-NEXT:    ret void
9700 //
9701 //
9702 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
9703 // CHECK6-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] {
9704 // CHECK6-NEXT:  entry:
9705 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9706 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9707 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
9708 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
9709 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9710 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9711 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9712 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9713 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
9714 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
9715 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
9716 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
9717 // CHECK6-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
9718 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
9719 // CHECK6-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
9720 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
9721 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
9722 // CHECK6-NEXT:    ret void
9723 //
9724 //
9725 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..6
9726 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
9727 // CHECK6-NEXT:  entry:
9728 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9729 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9730 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9731 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9732 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9733 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9734 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9735 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9736 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9737 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9738 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
9739 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9740 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9741 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9742 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9743 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9744 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9745 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9746 // CHECK6-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
9747 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9748 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9749 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9750 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
9751 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9752 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9753 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
9754 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9755 // CHECK6:       cond.true:
9756 // CHECK6-NEXT:    br label [[COND_END:%.*]]
9757 // CHECK6:       cond.false:
9758 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9759 // CHECK6-NEXT:    br label [[COND_END]]
9760 // CHECK6:       cond.end:
9761 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
9762 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9763 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9764 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
9765 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9766 // CHECK6:       omp.inner.for.cond:
9767 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
9768 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
9769 // CHECK6-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
9770 // CHECK6-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9771 // CHECK6:       omp.inner.for.body:
9772 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
9773 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
9774 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9775 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
9776 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !33
9777 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
9778 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !33
9779 // CHECK6-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !33
9780 // CHECK6-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
9781 // CHECK6-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
9782 // CHECK6-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
9783 // CHECK6-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !33
9784 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9785 // CHECK6:       omp.body.continue:
9786 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9787 // CHECK6:       omp.inner.for.inc:
9788 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
9789 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
9790 // CHECK6-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
9791 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
9792 // CHECK6:       omp.inner.for.end:
9793 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9794 // CHECK6:       omp.loop.exit:
9795 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
9796 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9797 // CHECK6-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
9798 // CHECK6-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9799 // CHECK6:       .omp.final.then:
9800 // CHECK6-NEXT:    store i32 10, i32* [[I]], align 4
9801 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9802 // CHECK6:       .omp.final.done:
9803 // CHECK6-NEXT:    ret void
9804 //
9805 //
9806 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
9807 // CHECK6-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR2]] {
9808 // CHECK6-NEXT:  entry:
9809 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9810 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
9811 // CHECK6-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9812 // CHECK6-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
9813 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
9814 // CHECK6-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
9815 // CHECK6-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
9816 // CHECK6-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
9817 // CHECK6-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
9818 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
9819 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9820 // CHECK6-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
9821 // CHECK6-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9822 // CHECK6-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
9823 // CHECK6-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
9824 // CHECK6-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
9825 // CHECK6-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
9826 // CHECK6-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
9827 // CHECK6-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
9828 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9829 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
9830 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9831 // CHECK6-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
9832 // CHECK6-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
9833 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
9834 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
9835 // CHECK6-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
9836 // CHECK6-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
9837 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
9838 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
9839 // CHECK6-NEXT:    store i32 [[TMP8]], i32* [[CONV5]], align 4
9840 // CHECK6-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
9841 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
9842 // CHECK6-NEXT:    ret void
9843 //
9844 //
9845 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..9
9846 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR3]] {
9847 // CHECK6-NEXT:  entry:
9848 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9849 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9850 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9851 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
9852 // CHECK6-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9853 // CHECK6-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
9854 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
9855 // CHECK6-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
9856 // CHECK6-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
9857 // CHECK6-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
9858 // CHECK6-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
9859 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9860 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9861 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9862 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9863 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9864 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9865 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
9866 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9867 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9868 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9869 // CHECK6-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
9870 // CHECK6-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9871 // CHECK6-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
9872 // CHECK6-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
9873 // CHECK6-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
9874 // CHECK6-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
9875 // CHECK6-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
9876 // CHECK6-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
9877 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9878 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
9879 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9880 // CHECK6-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
9881 // CHECK6-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
9882 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
9883 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
9884 // CHECK6-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
9885 // CHECK6-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
9886 // CHECK6-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 0
9887 // CHECK6-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
9888 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9889 // CHECK6-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
9890 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9891 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9892 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9893 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9894 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9895 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9896 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
9897 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9898 // CHECK6:       cond.true:
9899 // CHECK6-NEXT:    br label [[COND_END:%.*]]
9900 // CHECK6:       cond.false:
9901 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9902 // CHECK6-NEXT:    br label [[COND_END]]
9903 // CHECK6:       cond.end:
9904 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
9905 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
9906 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9907 // CHECK6-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
9908 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9909 // CHECK6:       omp.inner.for.cond:
9910 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
9911 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
9912 // CHECK6-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
9913 // CHECK6-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9914 // CHECK6:       omp.inner.for.body:
9915 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
9916 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
9917 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9918 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !36
9919 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !36
9920 // CHECK6-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
9921 // CHECK6-NEXT:    store i32 [[ADD6]], i32* [[CONV]], align 4, !llvm.access.group !36
9922 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
9923 // CHECK6-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36
9924 // CHECK6-NEXT:    [[CONV7:%.*]] = fpext float [[TMP17]] to double
9925 // CHECK6-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
9926 // CHECK6-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
9927 // CHECK6-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36
9928 // CHECK6-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
9929 // CHECK6-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
9930 // CHECK6-NEXT:    [[CONV11:%.*]] = fpext float [[TMP18]] to double
9931 // CHECK6-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
9932 // CHECK6-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
9933 // CHECK6-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
9934 // CHECK6-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
9935 // CHECK6-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i64 0, i64 2
9936 // CHECK6-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
9937 // CHECK6-NEXT:    [[ADD16:%.*]] = fadd double [[TMP19]], 1.000000e+00
9938 // CHECK6-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
9939 // CHECK6-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]]
9940 // CHECK6-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP20]]
9941 // CHECK6-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i64 3
9942 // CHECK6-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
9943 // CHECK6-NEXT:    [[ADD19:%.*]] = fadd double [[TMP21]], 1.000000e+00
9944 // CHECK6-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
9945 // CHECK6-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
9946 // CHECK6-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !36
9947 // CHECK6-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP22]], 1
9948 // CHECK6-NEXT:    store i64 [[ADD20]], i64* [[X]], align 8, !llvm.access.group !36
9949 // CHECK6-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
9950 // CHECK6-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !36
9951 // CHECK6-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP23]] to i32
9952 // CHECK6-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
9953 // CHECK6-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
9954 // CHECK6-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 8, !llvm.access.group !36
9955 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9956 // CHECK6:       omp.body.continue:
9957 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9958 // CHECK6:       omp.inner.for.inc:
9959 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
9960 // CHECK6-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP24]], 1
9961 // CHECK6-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
9962 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
9963 // CHECK6:       omp.inner.for.end:
9964 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9965 // CHECK6:       omp.loop.exit:
9966 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
9967 // CHECK6-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9968 // CHECK6-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9969 // CHECK6-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9970 // CHECK6:       .omp.final.then:
9971 // CHECK6-NEXT:    store i32 10, i32* [[I]], align 4
9972 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9973 // CHECK6:       .omp.final.done:
9974 // CHECK6-NEXT:    ret void
9975 //
9976 //
9977 // CHECK6-LABEL: define {{[^@]+}}@_Z3bari
9978 // CHECK6-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
9979 // CHECK6-NEXT:  entry:
9980 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
9981 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
9982 // CHECK6-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
9983 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
9984 // CHECK6-NEXT:    store i32 0, i32* [[A]], align 4
9985 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
9986 // CHECK6-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
9987 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
9988 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
9989 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
9990 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
9991 // CHECK6-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
9992 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
9993 // CHECK6-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
9994 // CHECK6-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
9995 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
9996 // CHECK6-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
9997 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
9998 // CHECK6-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
9999 // CHECK6-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
10000 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
10001 // CHECK6-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
10002 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
10003 // CHECK6-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
10004 // CHECK6-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
10005 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
10006 // CHECK6-NEXT:    ret i32 [[TMP8]]
10007 //
10008 //
10009 // CHECK6-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
10010 // CHECK6-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
10011 // CHECK6-NEXT:  entry:
10012 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
10013 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10014 // CHECK6-NEXT:    [[B:%.*]] = alloca i32, align 4
10015 // CHECK6-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
10016 // CHECK6-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
10017 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
10018 // CHECK6-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
10019 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10020 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8
10021 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8
10022 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8
10023 // CHECK6-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8
10024 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10025 // CHECK6-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
10026 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10027 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
10028 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
10029 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
10030 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
10031 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
10032 // CHECK6-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
10033 // CHECK6-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
10034 // CHECK6-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
10035 // CHECK6-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
10036 // CHECK6-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
10037 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
10038 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
10039 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
10040 // CHECK6-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
10041 // CHECK6-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
10042 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B]], align 4
10043 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
10044 // CHECK6-NEXT:    store i32 [[TMP6]], i32* [[CONV]], align 4
10045 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[B_CASTED]], align 8
10046 // CHECK6-NEXT:    [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
10047 // CHECK6-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
10048 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
10049 // CHECK6-NEXT:    [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL]] to i8
10050 // CHECK6-NEXT:    store i8 [[FROMBOOL3]], i8* [[CONV2]], align 1
10051 // CHECK6-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
10052 // CHECK6-NEXT:    [[TMP10:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
10053 // CHECK6-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP10]] to i1
10054 // CHECK6-NEXT:    br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
10055 // CHECK6:       omp_if.then:
10056 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
10057 // CHECK6-NEXT:    [[TMP11:%.*]] = mul nuw i64 2, [[TMP2]]
10058 // CHECK6-NEXT:    [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
10059 // CHECK6-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10060 // CHECK6-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.S1**
10061 // CHECK6-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP14]], align 8
10062 // CHECK6-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10063 // CHECK6-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
10064 // CHECK6-NEXT:    store double* [[A]], double** [[TMP16]], align 8
10065 // CHECK6-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
10066 // CHECK6-NEXT:    store i64 8, i64* [[TMP17]], align 8
10067 // CHECK6-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
10068 // CHECK6-NEXT:    store i8* null, i8** [[TMP18]], align 8
10069 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
10070 // CHECK6-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
10071 // CHECK6-NEXT:    store i64 [[TMP7]], i64* [[TMP20]], align 8
10072 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
10073 // CHECK6-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
10074 // CHECK6-NEXT:    store i64 [[TMP7]], i64* [[TMP22]], align 8
10075 // CHECK6-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
10076 // CHECK6-NEXT:    store i64 4, i64* [[TMP23]], align 8
10077 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
10078 // CHECK6-NEXT:    store i8* null, i8** [[TMP24]], align 8
10079 // CHECK6-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
10080 // CHECK6-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64*
10081 // CHECK6-NEXT:    store i64 2, i64* [[TMP26]], align 8
10082 // CHECK6-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
10083 // CHECK6-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64*
10084 // CHECK6-NEXT:    store i64 2, i64* [[TMP28]], align 8
10085 // CHECK6-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
10086 // CHECK6-NEXT:    store i64 8, i64* [[TMP29]], align 8
10087 // CHECK6-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
10088 // CHECK6-NEXT:    store i8* null, i8** [[TMP30]], align 8
10089 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
10090 // CHECK6-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
10091 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[TMP32]], align 8
10092 // CHECK6-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
10093 // CHECK6-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64*
10094 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[TMP34]], align 8
10095 // CHECK6-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
10096 // CHECK6-NEXT:    store i64 8, i64* [[TMP35]], align 8
10097 // CHECK6-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
10098 // CHECK6-NEXT:    store i8* null, i8** [[TMP36]], align 8
10099 // CHECK6-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
10100 // CHECK6-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16**
10101 // CHECK6-NEXT:    store i16* [[VLA]], i16** [[TMP38]], align 8
10102 // CHECK6-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
10103 // CHECK6-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16**
10104 // CHECK6-NEXT:    store i16* [[VLA]], i16** [[TMP40]], align 8
10105 // CHECK6-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
10106 // CHECK6-NEXT:    store i64 [[TMP12]], i64* [[TMP41]], align 8
10107 // CHECK6-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
10108 // CHECK6-NEXT:    store i8* null, i8** [[TMP42]], align 8
10109 // CHECK6-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
10110 // CHECK6-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i64*
10111 // CHECK6-NEXT:    store i64 [[TMP9]], i64* [[TMP44]], align 8
10112 // CHECK6-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
10113 // CHECK6-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64*
10114 // CHECK6-NEXT:    store i64 [[TMP9]], i64* [[TMP46]], align 8
10115 // CHECK6-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
10116 // CHECK6-NEXT:    store i64 1, i64* [[TMP47]], align 8
10117 // CHECK6-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
10118 // CHECK6-NEXT:    store i8* null, i8** [[TMP48]], align 8
10119 // CHECK6-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10120 // CHECK6-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10121 // CHECK6-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
10122 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
10123 // CHECK6-NEXT:    [[TMP52:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
10124 // CHECK6-NEXT:    [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0
10125 // CHECK6-NEXT:    br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
10126 // CHECK6:       omp_offload.failed:
10127 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]]
10128 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
10129 // CHECK6:       omp_offload.cont:
10130 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
10131 // CHECK6:       omp_if.else:
10132 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]]
10133 // CHECK6-NEXT:    br label [[OMP_IF_END]]
10134 // CHECK6:       omp_if.end:
10135 // CHECK6-NEXT:    [[TMP54:%.*]] = mul nsw i64 1, [[TMP2]]
10136 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP54]]
10137 // CHECK6-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
10138 // CHECK6-NEXT:    [[TMP55:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2
10139 // CHECK6-NEXT:    [[CONV6:%.*]] = sext i16 [[TMP55]] to i32
10140 // CHECK6-NEXT:    [[TMP56:%.*]] = load i32, i32* [[B]], align 4
10141 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP56]]
10142 // CHECK6-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
10143 // CHECK6-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
10144 // CHECK6-NEXT:    ret i32 [[ADD7]]
10145 //
10146 //
10147 // CHECK6-LABEL: define {{[^@]+}}@_ZL7fstatici
10148 // CHECK6-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
10149 // CHECK6-NEXT:  entry:
10150 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10151 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
10152 // CHECK6-NEXT:    [[AA:%.*]] = alloca i16, align 2
10153 // CHECK6-NEXT:    [[AAA:%.*]] = alloca i8, align 1
10154 // CHECK6-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
10155 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
10156 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10157 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
10158 // CHECK6-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
10159 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
10160 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
10161 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
10162 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10163 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10164 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
10165 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
10166 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10167 // CHECK6-NEXT:    store i32 0, i32* [[A]], align 4
10168 // CHECK6-NEXT:    store i16 0, i16* [[AA]], align 2
10169 // CHECK6-NEXT:    store i8 0, i8* [[AAA]], align 1
10170 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
10171 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
10172 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
10173 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
10174 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
10175 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10176 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[CONV1]], align 4
10177 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
10178 // CHECK6-NEXT:    [[TMP4:%.*]] = load i16, i16* [[AA]], align 2
10179 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
10180 // CHECK6-NEXT:    store i16 [[TMP4]], i16* [[CONV2]], align 2
10181 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[AA_CASTED]], align 8
10182 // CHECK6-NEXT:    [[TMP6:%.*]] = load i8, i8* [[AAA]], align 1
10183 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
10184 // CHECK6-NEXT:    store i8 [[TMP6]], i8* [[CONV3]], align 1
10185 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
10186 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[N_ADDR]], align 4
10187 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50
10188 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
10189 // CHECK6:       omp_if.then:
10190 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10191 // CHECK6-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
10192 // CHECK6-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
10193 // CHECK6-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10194 // CHECK6-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64*
10195 // CHECK6-NEXT:    store i64 [[TMP1]], i64* [[TMP12]], align 8
10196 // CHECK6-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
10197 // CHECK6-NEXT:    store i8* null, i8** [[TMP13]], align 8
10198 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
10199 // CHECK6-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
10200 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
10201 // CHECK6-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
10202 // CHECK6-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
10203 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP17]], align 8
10204 // CHECK6-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
10205 // CHECK6-NEXT:    store i8* null, i8** [[TMP18]], align 8
10206 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
10207 // CHECK6-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
10208 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[TMP20]], align 8
10209 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
10210 // CHECK6-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
10211 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[TMP22]], align 8
10212 // CHECK6-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
10213 // CHECK6-NEXT:    store i8* null, i8** [[TMP23]], align 8
10214 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
10215 // CHECK6-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
10216 // CHECK6-NEXT:    store i64 [[TMP7]], i64* [[TMP25]], align 8
10217 // CHECK6-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
10218 // CHECK6-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
10219 // CHECK6-NEXT:    store i64 [[TMP7]], i64* [[TMP27]], align 8
10220 // CHECK6-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
10221 // CHECK6-NEXT:    store i8* null, i8** [[TMP28]], align 8
10222 // CHECK6-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
10223 // CHECK6-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [10 x i32]**
10224 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP30]], align 8
10225 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
10226 // CHECK6-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [10 x i32]**
10227 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP32]], align 8
10228 // CHECK6-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
10229 // CHECK6-NEXT:    store i8* null, i8** [[TMP33]], align 8
10230 // CHECK6-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10231 // CHECK6-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10232 // CHECK6-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
10233 // CHECK6-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_]], align 4
10234 // CHECK6-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N_ADDR]], align 4
10235 // CHECK6-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_4]], align 4
10236 // CHECK6-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
10237 // CHECK6-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10238 // CHECK6-NEXT:    [[SUB:%.*]] = sub i32 [[TMP38]], [[TMP39]]
10239 // CHECK6-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
10240 // CHECK6-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
10241 // CHECK6-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
10242 // CHECK6-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
10243 // CHECK6-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
10244 // CHECK6-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
10245 // CHECK6-NEXT:    [[ADD8:%.*]] = add i32 [[TMP40]], 1
10246 // CHECK6-NEXT:    [[TMP41:%.*]] = zext i32 [[ADD8]] to i64
10247 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]])
10248 // CHECK6-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
10249 // CHECK6-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
10250 // CHECK6-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
10251 // CHECK6:       omp_offload.failed:
10252 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
10253 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
10254 // CHECK6:       omp_offload.cont:
10255 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
10256 // CHECK6:       omp_if.else:
10257 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
10258 // CHECK6-NEXT:    br label [[OMP_IF_END]]
10259 // CHECK6:       omp_if.end:
10260 // CHECK6-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
10261 // CHECK6-NEXT:    ret i32 [[TMP44]]
10262 //
10263 //
10264 // CHECK6-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
10265 // CHECK6-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
10266 // CHECK6-NEXT:  entry:
10267 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10268 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
10269 // CHECK6-NEXT:    [[AA:%.*]] = alloca i16, align 2
10270 // CHECK6-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
10271 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
10272 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
10273 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
10274 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
10275 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
10276 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10277 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10278 // CHECK6-NEXT:    store i32 0, i32* [[A]], align 4
10279 // CHECK6-NEXT:    store i16 0, i16* [[AA]], align 2
10280 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
10281 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
10282 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
10283 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
10284 // CHECK6-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
10285 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
10286 // CHECK6-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
10287 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
10288 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
10289 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
10290 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
10291 // CHECK6:       omp_if.then:
10292 // CHECK6-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10293 // CHECK6-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
10294 // CHECK6-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
10295 // CHECK6-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10296 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
10297 // CHECK6-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
10298 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
10299 // CHECK6-NEXT:    store i8* null, i8** [[TMP9]], align 8
10300 // CHECK6-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
10301 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
10302 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
10303 // CHECK6-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
10304 // CHECK6-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
10305 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
10306 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
10307 // CHECK6-NEXT:    store i8* null, i8** [[TMP14]], align 8
10308 // CHECK6-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
10309 // CHECK6-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
10310 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
10311 // CHECK6-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
10312 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
10313 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
10314 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
10315 // CHECK6-NEXT:    store i8* null, i8** [[TMP19]], align 8
10316 // CHECK6-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10317 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10318 // CHECK6-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
10319 // CHECK6-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
10320 // CHECK6-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
10321 // CHECK6-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
10322 // CHECK6:       omp_offload.failed:
10323 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
10324 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
10325 // CHECK6:       omp_offload.cont:
10326 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
10327 // CHECK6:       omp_if.else:
10328 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
10329 // CHECK6-NEXT:    br label [[OMP_IF_END]]
10330 // CHECK6:       omp_if.end:
10331 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
10332 // CHECK6-NEXT:    ret i32 [[TMP24]]
10333 //
10334 //
10335 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
10336 // CHECK6-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10337 // CHECK6-NEXT:  entry:
10338 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
10339 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
10340 // CHECK6-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
10341 // CHECK6-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
10342 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
10343 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10344 // CHECK6-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
10345 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10346 // CHECK6-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
10347 // CHECK6-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
10348 // CHECK6-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
10349 // CHECK6-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
10350 // CHECK6-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
10351 // CHECK6-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10352 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
10353 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
10354 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
10355 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
10356 // CHECK6-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
10357 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
10358 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
10359 // CHECK6-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32*
10360 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[CONV4]], align 4
10361 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
10362 // CHECK6-NEXT:    [[TMP6:%.*]] = load i8, i8* [[CONV3]], align 1
10363 // CHECK6-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
10364 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
10365 // CHECK6-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
10366 // CHECK6-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV5]], align 1
10367 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
10368 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]], i64 [[TMP7]])
10369 // CHECK6-NEXT:    ret void
10370 //
10371 //
10372 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..11
10373 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
10374 // CHECK6-NEXT:  entry:
10375 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10376 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10377 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
10378 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
10379 // CHECK6-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
10380 // CHECK6-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
10381 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
10382 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10383 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10384 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10385 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10386 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10387 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10388 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10389 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
10390 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10391 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10392 // CHECK6-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
10393 // CHECK6-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
10394 // CHECK6-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
10395 // CHECK6-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
10396 // CHECK6-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
10397 // CHECK6-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
10398 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
10399 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
10400 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
10401 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
10402 // CHECK6-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
10403 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
10404 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10405 // CHECK6-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
10406 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10407 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10408 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10409 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
10410 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10411 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10412 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
10413 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10414 // CHECK6:       cond.true:
10415 // CHECK6-NEXT:    br label [[COND_END:%.*]]
10416 // CHECK6:       cond.false:
10417 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10418 // CHECK6-NEXT:    br label [[COND_END]]
10419 // CHECK6:       cond.end:
10420 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
10421 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10422 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10423 // CHECK6-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
10424 // CHECK6-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1
10425 // CHECK6-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
10426 // CHECK6-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
10427 // CHECK6:       omp_if.then:
10428 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10429 // CHECK6:       omp.inner.for.cond:
10430 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
10431 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39
10432 // CHECK6-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
10433 // CHECK6-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10434 // CHECK6:       omp.inner.for.body:
10435 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
10436 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
10437 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10438 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39
10439 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !39
10440 // CHECK6-NEXT:    [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double
10441 // CHECK6-NEXT:    [[ADD6:%.*]] = fadd double [[CONV5]], 1.500000e+00
10442 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
10443 // CHECK6-NEXT:    store double [[ADD6]], double* [[A]], align 8, !llvm.access.group !39
10444 // CHECK6-NEXT:    [[A7:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
10445 // CHECK6-NEXT:    [[TMP14:%.*]] = load double, double* [[A7]], align 8, !llvm.access.group !39
10446 // CHECK6-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
10447 // CHECK6-NEXT:    store double [[INC]], double* [[A7]], align 8, !llvm.access.group !39
10448 // CHECK6-NEXT:    [[CONV8:%.*]] = fptosi double [[INC]] to i16
10449 // CHECK6-NEXT:    [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]]
10450 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]]
10451 // CHECK6-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
10452 // CHECK6-NEXT:    store i16 [[CONV8]], i16* [[ARRAYIDX9]], align 2, !llvm.access.group !39
10453 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10454 // CHECK6:       omp.body.continue:
10455 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10456 // CHECK6:       omp.inner.for.inc:
10457 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
10458 // CHECK6-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
10459 // CHECK6-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
10460 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
10461 // CHECK6:       omp.inner.for.end:
10462 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
10463 // CHECK6:       omp_if.else:
10464 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND11:%.*]]
10465 // CHECK6:       omp.inner.for.cond11:
10466 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10467 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10468 // CHECK6-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10469 // CHECK6-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END27:%.*]]
10470 // CHECK6:       omp.inner.for.body13:
10471 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10472 // CHECK6-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[TMP19]], 1
10473 // CHECK6-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
10474 // CHECK6-NEXT:    store i32 [[ADD15]], i32* [[I]], align 4
10475 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4
10476 // CHECK6-NEXT:    [[CONV16:%.*]] = sitofp i32 [[TMP20]] to double
10477 // CHECK6-NEXT:    [[ADD17:%.*]] = fadd double [[CONV16]], 1.500000e+00
10478 // CHECK6-NEXT:    [[A18:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
10479 // CHECK6-NEXT:    store double [[ADD17]], double* [[A18]], align 8
10480 // CHECK6-NEXT:    [[A19:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
10481 // CHECK6-NEXT:    [[TMP21:%.*]] = load double, double* [[A19]], align 8
10482 // CHECK6-NEXT:    [[INC20:%.*]] = fadd double [[TMP21]], 1.000000e+00
10483 // CHECK6-NEXT:    store double [[INC20]], double* [[A19]], align 8
10484 // CHECK6-NEXT:    [[CONV21:%.*]] = fptosi double [[INC20]] to i16
10485 // CHECK6-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
10486 // CHECK6-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP22]]
10487 // CHECK6-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX22]], i64 1
10488 // CHECK6-NEXT:    store i16 [[CONV21]], i16* [[ARRAYIDX23]], align 2
10489 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE24:%.*]]
10490 // CHECK6:       omp.body.continue24:
10491 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC25:%.*]]
10492 // CHECK6:       omp.inner.for.inc25:
10493 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10494 // CHECK6-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP23]], 1
10495 // CHECK6-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4
10496 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND11]], !llvm.loop [[LOOP42:![0-9]+]]
10497 // CHECK6:       omp.inner.for.end27:
10498 // CHECK6-NEXT:    br label [[OMP_IF_END]]
10499 // CHECK6:       omp_if.end:
10500 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10501 // CHECK6:       omp.loop.exit:
10502 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
10503 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10504 // CHECK6-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
10505 // CHECK6-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10506 // CHECK6:       .omp.final.then:
10507 // CHECK6-NEXT:    store i32 10, i32* [[I]], align 4
10508 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10509 // CHECK6:       .omp.final.done:
10510 // CHECK6-NEXT:    ret void
10511 //
10512 //
10513 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
10514 // CHECK6-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
10515 // CHECK6-NEXT:  entry:
10516 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
10517 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10518 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
10519 // CHECK6-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
10520 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
10521 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
10522 // CHECK6-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
10523 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
10524 // CHECK6-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
10525 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
10526 // CHECK6-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10527 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
10528 // CHECK6-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
10529 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
10530 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
10531 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10532 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
10533 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
10534 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
10535 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
10536 // CHECK6-NEXT:    [[CONV4:%.*]] = bitcast i64* [[A_CASTED]] to i32*
10537 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[CONV4]], align 4
10538 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
10539 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 4
10540 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[N_CASTED]] to i32*
10541 // CHECK6-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
10542 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
10543 // CHECK6-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV2]], align 2
10544 // CHECK6-NEXT:    [[CONV6:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
10545 // CHECK6-NEXT:    store i16 [[TMP5]], i16* [[CONV6]], align 2
10546 // CHECK6-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AA_CASTED]], align 8
10547 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
10548 // CHECK6-NEXT:    [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
10549 // CHECK6-NEXT:    store i8 [[TMP7]], i8* [[CONV7]], align 1
10550 // CHECK6-NEXT:    [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
10551 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]])
10552 // CHECK6-NEXT:    ret void
10553 //
10554 //
10555 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..13
10556 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
10557 // CHECK6-NEXT:  entry:
10558 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10559 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10560 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
10561 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
10562 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
10563 // CHECK6-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
10564 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
10565 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10566 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10567 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10568 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
10569 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
10570 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
10571 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10572 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10573 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10574 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10575 // CHECK6-NEXT:    [[I8:%.*]] = alloca i32, align 4
10576 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10577 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10578 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
10579 // CHECK6-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
10580 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
10581 // CHECK6-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
10582 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
10583 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
10584 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
10585 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
10586 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
10587 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
10588 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
10589 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
10590 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
10591 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_4]], align 4
10592 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
10593 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10594 // CHECK6-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
10595 // CHECK6-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
10596 // CHECK6-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
10597 // CHECK6-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
10598 // CHECK6-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
10599 // CHECK6-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
10600 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10601 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
10602 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10603 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
10604 // CHECK6-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
10605 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10606 // CHECK6:       omp.precond.then:
10607 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10608 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
10609 // CHECK6-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
10610 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10611 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10612 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10613 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
10614 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10615 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10616 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
10617 // CHECK6-NEXT:    [[CMP9:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
10618 // CHECK6-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10619 // CHECK6:       cond.true:
10620 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
10621 // CHECK6-NEXT:    br label [[COND_END:%.*]]
10622 // CHECK6:       cond.false:
10623 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10624 // CHECK6-NEXT:    br label [[COND_END]]
10625 // CHECK6:       cond.end:
10626 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
10627 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10628 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10629 // CHECK6-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
10630 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10631 // CHECK6:       omp.inner.for.cond:
10632 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10633 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !44
10634 // CHECK6-NEXT:    [[ADD10:%.*]] = add i32 [[TMP17]], 1
10635 // CHECK6-NEXT:    [[CMP11:%.*]] = icmp ult i32 [[TMP16]], [[ADD10]]
10636 // CHECK6-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10637 // CHECK6:       omp.inner.for.body:
10638 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !44
10639 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10640 // CHECK6-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
10641 // CHECK6-NEXT:    [[ADD12:%.*]] = add i32 [[TMP18]], [[MUL]]
10642 // CHECK6-NEXT:    store i32 [[ADD12]], i32* [[I8]], align 4, !llvm.access.group !44
10643 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !44
10644 // CHECK6-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP20]], 1
10645 // CHECK6-NEXT:    store i32 [[ADD13]], i32* [[CONV]], align 4, !llvm.access.group !44
10646 // CHECK6-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV2]], align 2, !llvm.access.group !44
10647 // CHECK6-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP21]] to i32
10648 // CHECK6-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
10649 // CHECK6-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
10650 // CHECK6-NEXT:    store i16 [[CONV16]], i16* [[CONV2]], align 2, !llvm.access.group !44
10651 // CHECK6-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV3]], align 1, !llvm.access.group !44
10652 // CHECK6-NEXT:    [[CONV17:%.*]] = sext i8 [[TMP22]] to i32
10653 // CHECK6-NEXT:    [[ADD18:%.*]] = add nsw i32 [[CONV17]], 1
10654 // CHECK6-NEXT:    [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
10655 // CHECK6-NEXT:    store i8 [[CONV19]], i8* [[CONV3]], align 1, !llvm.access.group !44
10656 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
10657 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
10658 // CHECK6-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP23]], 1
10659 // CHECK6-NEXT:    store i32 [[ADD20]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
10660 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10661 // CHECK6:       omp.body.continue:
10662 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10663 // CHECK6:       omp.inner.for.inc:
10664 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10665 // CHECK6-NEXT:    [[ADD21:%.*]] = add i32 [[TMP24]], 1
10666 // CHECK6-NEXT:    store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
10667 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
10668 // CHECK6:       omp.inner.for.end:
10669 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10670 // CHECK6:       omp.loop.exit:
10671 // CHECK6-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10672 // CHECK6-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
10673 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
10674 // CHECK6-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10675 // CHECK6-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
10676 // CHECK6-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10677 // CHECK6:       .omp.final.then:
10678 // CHECK6-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10679 // CHECK6-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
10680 // CHECK6-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10681 // CHECK6-NEXT:    [[SUB22:%.*]] = sub i32 [[TMP30]], [[TMP31]]
10682 // CHECK6-NEXT:    [[SUB23:%.*]] = sub i32 [[SUB22]], 1
10683 // CHECK6-NEXT:    [[ADD24:%.*]] = add i32 [[SUB23]], 1
10684 // CHECK6-NEXT:    [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
10685 // CHECK6-NEXT:    [[MUL26:%.*]] = mul i32 [[DIV25]], 1
10686 // CHECK6-NEXT:    [[ADD27:%.*]] = add i32 [[TMP29]], [[MUL26]]
10687 // CHECK6-NEXT:    store i32 [[ADD27]], i32* [[I8]], align 4
10688 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10689 // CHECK6:       .omp.final.done:
10690 // CHECK6-NEXT:    br label [[OMP_PRECOND_END]]
10691 // CHECK6:       omp.precond.end:
10692 // CHECK6-NEXT:    ret void
10693 //
10694 //
10695 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
10696 // CHECK6-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
10697 // CHECK6-NEXT:  entry:
10698 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
10699 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
10700 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
10701 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
10702 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
10703 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
10704 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
10705 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
10706 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
10707 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
10708 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
10709 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
10710 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
10711 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
10712 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
10713 // CHECK6-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
10714 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
10715 // CHECK6-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
10716 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
10717 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
10718 // CHECK6-NEXT:    ret void
10719 //
10720 //
10721 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..16
10722 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
10723 // CHECK6-NEXT:  entry:
10724 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
10725 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
10726 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
10727 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
10728 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
10729 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10730 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10731 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10732 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10733 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10734 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10735 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
10736 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
10737 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
10738 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
10739 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
10740 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
10741 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
10742 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
10743 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
10744 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10745 // CHECK6-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
10746 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10747 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10748 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
10749 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
10750 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10751 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10752 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10753 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10754 // CHECK6:       cond.true:
10755 // CHECK6-NEXT:    br label [[COND_END:%.*]]
10756 // CHECK6:       cond.false:
10757 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10758 // CHECK6-NEXT:    br label [[COND_END]]
10759 // CHECK6:       cond.end:
10760 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10761 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10762 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10763 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
10764 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10765 // CHECK6:       omp.inner.for.cond:
10766 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10767 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
10768 // CHECK6-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10769 // CHECK6-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10770 // CHECK6:       omp.inner.for.body:
10771 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10772 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
10773 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10774 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !47
10775 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !47
10776 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
10777 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !47
10778 // CHECK6-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !47
10779 // CHECK6-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
10780 // CHECK6-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
10781 // CHECK6-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
10782 // CHECK6-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !47
10783 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
10784 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !47
10785 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
10786 // CHECK6-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !47
10787 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10788 // CHECK6:       omp.body.continue:
10789 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10790 // CHECK6:       omp.inner.for.inc:
10791 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10792 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP12]], 1
10793 // CHECK6-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
10794 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
10795 // CHECK6:       omp.inner.for.end:
10796 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10797 // CHECK6:       omp.loop.exit:
10798 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
10799 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10800 // CHECK6-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
10801 // CHECK6-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10802 // CHECK6:       .omp.final.then:
10803 // CHECK6-NEXT:    store i32 10, i32* [[I]], align 4
10804 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10805 // CHECK6:       .omp.final.done:
10806 // CHECK6-NEXT:    ret void
10807 //
10808 //
10809 // CHECK6-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
10810 // CHECK6-SAME: () #[[ATTR5]] {
10811 // CHECK6-NEXT:  entry:
10812 // CHECK6-NEXT:    call void @__tgt_register_requires(i64 1)
10813 // CHECK6-NEXT:    ret void
10814 //
10815 //
10816 // CHECK7-LABEL: define {{[^@]+}}@_Z3fooi
10817 // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
10818 // CHECK7-NEXT:  entry:
10819 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10820 // CHECK7-NEXT:    [[A:%.*]] = alloca i32, align 4
10821 // CHECK7-NEXT:    [[AA:%.*]] = alloca i16, align 2
10822 // CHECK7-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
10823 // CHECK7-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
10824 // CHECK7-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
10825 // CHECK7-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
10826 // CHECK7-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
10827 // CHECK7-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
10828 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10829 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10830 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
10831 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
10832 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED3:%.*]] = alloca i32, align 4
10833 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
10834 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
10835 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
10836 // CHECK7-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
10837 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
10838 // CHECK7-NEXT:    [[AA_CASTED4:%.*]] = alloca i32, align 4
10839 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [1 x i8*], align 4
10840 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS7:%.*]] = alloca [1 x i8*], align 4
10841 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [1 x i8*], align 4
10842 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10843 // CHECK7-NEXT:    [[A_CASTED9:%.*]] = alloca i32, align 4
10844 // CHECK7-NEXT:    [[AA_CASTED10:%.*]] = alloca i32, align 4
10845 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS12:%.*]] = alloca [2 x i8*], align 4
10846 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS13:%.*]] = alloca [2 x i8*], align 4
10847 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS14:%.*]] = alloca [2 x i8*], align 4
10848 // CHECK7-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
10849 // CHECK7-NEXT:    [[A_CASTED18:%.*]] = alloca i32, align 4
10850 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [9 x i8*], align 4
10851 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS22:%.*]] = alloca [9 x i8*], align 4
10852 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [9 x i8*], align 4
10853 // CHECK7-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4
10854 // CHECK7-NEXT:    [[_TMP24:%.*]] = alloca i32, align 4
10855 // CHECK7-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
10856 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10857 // CHECK7-NEXT:    store i32 0, i32* [[A]], align 4
10858 // CHECK7-NEXT:    store i16 0, i16* [[AA]], align 2
10859 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
10860 // CHECK7-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
10861 // CHECK7-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
10862 // CHECK7-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
10863 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
10864 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
10865 // CHECK7-NEXT:    [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
10866 // CHECK7-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
10867 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
10868 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
10869 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
10870 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
10871 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_2]], align 4
10872 // CHECK7-NEXT:    [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
10873 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
10874 // CHECK7-NEXT:    store i16 [[TMP7]], i16* [[CONV]], align 2
10875 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AA_CASTED]], align 4
10876 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10877 // CHECK7-NEXT:    store i32 [[TMP9]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
10878 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
10879 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10880 // CHECK7-NEXT:    store i32 [[TMP11]], i32* [[DOTCAPTURE_EXPR__CASTED3]], align 4
10881 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED3]], align 4
10882 // CHECK7-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10883 // CHECK7-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32*
10884 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[TMP14]], align 4
10885 // CHECK7-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10886 // CHECK7-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32*
10887 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[TMP16]], align 4
10888 // CHECK7-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
10889 // CHECK7-NEXT:    store i8* null, i8** [[TMP17]], align 4
10890 // CHECK7-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
10891 // CHECK7-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
10892 // CHECK7-NEXT:    store i32 [[TMP10]], i32* [[TMP19]], align 4
10893 // CHECK7-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
10894 // CHECK7-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32*
10895 // CHECK7-NEXT:    store i32 [[TMP10]], i32* [[TMP21]], align 4
10896 // CHECK7-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
10897 // CHECK7-NEXT:    store i8* null, i8** [[TMP22]], align 4
10898 // CHECK7-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
10899 // CHECK7-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32*
10900 // CHECK7-NEXT:    store i32 [[TMP12]], i32* [[TMP24]], align 4
10901 // CHECK7-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
10902 // CHECK7-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32*
10903 // CHECK7-NEXT:    store i32 [[TMP12]], i32* [[TMP26]], align 4
10904 // CHECK7-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
10905 // CHECK7-NEXT:    store i8* null, i8** [[TMP27]], align 4
10906 // CHECK7-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10907 // CHECK7-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10908 // CHECK7-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
10909 // CHECK7-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2
10910 // CHECK7-NEXT:    store i16 [[TMP31]], i16* [[TMP30]], align 4
10911 // CHECK7-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
10912 // CHECK7-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10913 // CHECK7-NEXT:    store i32 [[TMP33]], i32* [[TMP32]], align 4
10914 // CHECK7-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
10915 // CHECK7-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
10916 // CHECK7-NEXT:    store i32 [[TMP35]], i32* [[TMP34]], align 4
10917 // CHECK7-NEXT:    [[TMP36:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
10918 // CHECK7-NEXT:    [[TMP37:%.*]] = bitcast i8* [[TMP36]] to %struct.kmp_task_t_with_privates*
10919 // CHECK7-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP37]], i32 0, i32 0
10920 // CHECK7-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP38]], i32 0, i32 0
10921 // CHECK7-NEXT:    [[TMP40:%.*]] = load i8*, i8** [[TMP39]], align 4
10922 // CHECK7-NEXT:    [[TMP41:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
10923 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP40]], i8* align 4 [[TMP41]], i32 12, i1 false)
10924 // CHECK7-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP37]], i32 0, i32 1
10925 // CHECK7-NEXT:    [[TMP43:%.*]] = bitcast i8* [[TMP40]] to %struct.anon*
10926 // CHECK7-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 0
10927 // CHECK7-NEXT:    [[TMP45:%.*]] = bitcast [3 x i64]* [[TMP44]] to i8*
10928 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP45]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false)
10929 // CHECK7-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 1
10930 // CHECK7-NEXT:    [[TMP47:%.*]] = bitcast [3 x i8*]* [[TMP46]] to i8*
10931 // CHECK7-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP28]] to i8*
10932 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP47]], i8* align 4 [[TMP48]], i32 12, i1 false)
10933 // CHECK7-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 2
10934 // CHECK7-NEXT:    [[TMP50:%.*]] = bitcast [3 x i8*]* [[TMP49]] to i8*
10935 // CHECK7-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP29]] to i8*
10936 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP50]], i8* align 4 [[TMP51]], i32 12, i1 false)
10937 // CHECK7-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 3
10938 // CHECK7-NEXT:    [[TMP53:%.*]] = load i16, i16* [[AA]], align 2
10939 // CHECK7-NEXT:    store i16 [[TMP53]], i16* [[TMP52]], align 4
10940 // CHECK7-NEXT:    [[TMP54:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP36]])
10941 // CHECK7-NEXT:    [[TMP55:%.*]] = load i32, i32* [[A]], align 4
10942 // CHECK7-NEXT:    store i32 [[TMP55]], i32* [[A_CASTED]], align 4
10943 // CHECK7-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A_CASTED]], align 4
10944 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102(i32 [[TMP56]]) #[[ATTR4:[0-9]+]]
10945 // CHECK7-NEXT:    [[TMP57:%.*]] = load i16, i16* [[AA]], align 2
10946 // CHECK7-NEXT:    [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16*
10947 // CHECK7-NEXT:    store i16 [[TMP57]], i16* [[CONV5]], align 2
10948 // CHECK7-NEXT:    [[TMP58:%.*]] = load i32, i32* [[AA_CASTED4]], align 4
10949 // CHECK7-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
10950 // CHECK7-NEXT:    [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32*
10951 // CHECK7-NEXT:    store i32 [[TMP58]], i32* [[TMP60]], align 4
10952 // CHECK7-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
10953 // CHECK7-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32*
10954 // CHECK7-NEXT:    store i32 [[TMP58]], i32* [[TMP62]], align 4
10955 // CHECK7-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0
10956 // CHECK7-NEXT:    store i8* null, i8** [[TMP63]], align 4
10957 // CHECK7-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
10958 // CHECK7-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
10959 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
10960 // CHECK7-NEXT:    [[TMP66:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111.region_id, i32 1, i8** [[TMP64]], i8** [[TMP65]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
10961 // CHECK7-NEXT:    [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0
10962 // CHECK7-NEXT:    br i1 [[TMP67]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
10963 // CHECK7:       omp_offload.failed:
10964 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i32 [[TMP58]]) #[[ATTR4]]
10965 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
10966 // CHECK7:       omp_offload.cont:
10967 // CHECK7-NEXT:    [[TMP68:%.*]] = load i32, i32* [[A]], align 4
10968 // CHECK7-NEXT:    store i32 [[TMP68]], i32* [[A_CASTED9]], align 4
10969 // CHECK7-NEXT:    [[TMP69:%.*]] = load i32, i32* [[A_CASTED9]], align 4
10970 // CHECK7-NEXT:    [[TMP70:%.*]] = load i16, i16* [[AA]], align 2
10971 // CHECK7-NEXT:    [[CONV11:%.*]] = bitcast i32* [[AA_CASTED10]] to i16*
10972 // CHECK7-NEXT:    store i16 [[TMP70]], i16* [[CONV11]], align 2
10973 // CHECK7-NEXT:    [[TMP71:%.*]] = load i32, i32* [[AA_CASTED10]], align 4
10974 // CHECK7-NEXT:    [[TMP72:%.*]] = load i32, i32* [[N_ADDR]], align 4
10975 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP72]], 10
10976 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
10977 // CHECK7:       omp_if.then:
10978 // CHECK7-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
10979 // CHECK7-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
10980 // CHECK7-NEXT:    store i32 [[TMP69]], i32* [[TMP74]], align 4
10981 // CHECK7-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
10982 // CHECK7-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
10983 // CHECK7-NEXT:    store i32 [[TMP69]], i32* [[TMP76]], align 4
10984 // CHECK7-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i32 0, i32 0
10985 // CHECK7-NEXT:    store i8* null, i8** [[TMP77]], align 4
10986 // CHECK7-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 1
10987 // CHECK7-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
10988 // CHECK7-NEXT:    store i32 [[TMP71]], i32* [[TMP79]], align 4
10989 // CHECK7-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 1
10990 // CHECK7-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
10991 // CHECK7-NEXT:    store i32 [[TMP71]], i32* [[TMP81]], align 4
10992 // CHECK7-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i32 0, i32 1
10993 // CHECK7-NEXT:    store i8* null, i8** [[TMP82]], align 4
10994 // CHECK7-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
10995 // CHECK7-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
10996 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
10997 // CHECK7-NEXT:    [[TMP85:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118.region_id, i32 2, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
10998 // CHECK7-NEXT:    [[TMP86:%.*]] = icmp ne i32 [[TMP85]], 0
10999 // CHECK7-NEXT:    br i1 [[TMP86]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
11000 // CHECK7:       omp_offload.failed16:
11001 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP69]], i32 [[TMP71]]) #[[ATTR4]]
11002 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT17]]
11003 // CHECK7:       omp_offload.cont17:
11004 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
11005 // CHECK7:       omp_if.else:
11006 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP69]], i32 [[TMP71]]) #[[ATTR4]]
11007 // CHECK7-NEXT:    br label [[OMP_IF_END]]
11008 // CHECK7:       omp_if.end:
11009 // CHECK7-NEXT:    [[TMP87:%.*]] = load i32, i32* [[A]], align 4
11010 // CHECK7-NEXT:    store i32 [[TMP87]], i32* [[A_CASTED18]], align 4
11011 // CHECK7-NEXT:    [[TMP88:%.*]] = load i32, i32* [[A_CASTED18]], align 4
11012 // CHECK7-NEXT:    [[TMP89:%.*]] = load i32, i32* [[N_ADDR]], align 4
11013 // CHECK7-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[TMP89]], 20
11014 // CHECK7-NEXT:    br i1 [[CMP19]], label [[OMP_IF_THEN20:%.*]], label [[OMP_IF_ELSE27:%.*]]
11015 // CHECK7:       omp_if.then20:
11016 // CHECK7-NEXT:    [[TMP90:%.*]] = mul nuw i32 [[TMP1]], 4
11017 // CHECK7-NEXT:    [[TMP91:%.*]] = sext i32 [[TMP90]] to i64
11018 // CHECK7-NEXT:    [[TMP92:%.*]] = mul nuw i32 5, [[TMP3]]
11019 // CHECK7-NEXT:    [[TMP93:%.*]] = mul nuw i32 [[TMP92]], 8
11020 // CHECK7-NEXT:    [[TMP94:%.*]] = sext i32 [[TMP93]] to i64
11021 // CHECK7-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
11022 // CHECK7-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32*
11023 // CHECK7-NEXT:    store i32 [[TMP88]], i32* [[TMP96]], align 4
11024 // CHECK7-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
11025 // CHECK7-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32*
11026 // CHECK7-NEXT:    store i32 [[TMP88]], i32* [[TMP98]], align 4
11027 // CHECK7-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
11028 // CHECK7-NEXT:    store i64 4, i64* [[TMP99]], align 4
11029 // CHECK7-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
11030 // CHECK7-NEXT:    store i8* null, i8** [[TMP100]], align 4
11031 // CHECK7-NEXT:    [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
11032 // CHECK7-NEXT:    [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]**
11033 // CHECK7-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4
11034 // CHECK7-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
11035 // CHECK7-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]**
11036 // CHECK7-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4
11037 // CHECK7-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
11038 // CHECK7-NEXT:    store i64 40, i64* [[TMP105]], align 4
11039 // CHECK7-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
11040 // CHECK7-NEXT:    store i8* null, i8** [[TMP106]], align 4
11041 // CHECK7-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2
11042 // CHECK7-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32*
11043 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP108]], align 4
11044 // CHECK7-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2
11045 // CHECK7-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32*
11046 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP110]], align 4
11047 // CHECK7-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
11048 // CHECK7-NEXT:    store i64 4, i64* [[TMP111]], align 4
11049 // CHECK7-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2
11050 // CHECK7-NEXT:    store i8* null, i8** [[TMP112]], align 4
11051 // CHECK7-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3
11052 // CHECK7-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float**
11053 // CHECK7-NEXT:    store float* [[VLA]], float** [[TMP114]], align 4
11054 // CHECK7-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3
11055 // CHECK7-NEXT:    [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float**
11056 // CHECK7-NEXT:    store float* [[VLA]], float** [[TMP116]], align 4
11057 // CHECK7-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
11058 // CHECK7-NEXT:    store i64 [[TMP91]], i64* [[TMP117]], align 4
11059 // CHECK7-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3
11060 // CHECK7-NEXT:    store i8* null, i8** [[TMP118]], align 4
11061 // CHECK7-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4
11062 // CHECK7-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]**
11063 // CHECK7-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4
11064 // CHECK7-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4
11065 // CHECK7-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]**
11066 // CHECK7-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4
11067 // CHECK7-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
11068 // CHECK7-NEXT:    store i64 400, i64* [[TMP123]], align 4
11069 // CHECK7-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4
11070 // CHECK7-NEXT:    store i8* null, i8** [[TMP124]], align 4
11071 // CHECK7-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5
11072 // CHECK7-NEXT:    [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32*
11073 // CHECK7-NEXT:    store i32 5, i32* [[TMP126]], align 4
11074 // CHECK7-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5
11075 // CHECK7-NEXT:    [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32*
11076 // CHECK7-NEXT:    store i32 5, i32* [[TMP128]], align 4
11077 // CHECK7-NEXT:    [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
11078 // CHECK7-NEXT:    store i64 4, i64* [[TMP129]], align 4
11079 // CHECK7-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5
11080 // CHECK7-NEXT:    store i8* null, i8** [[TMP130]], align 4
11081 // CHECK7-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6
11082 // CHECK7-NEXT:    [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32*
11083 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP132]], align 4
11084 // CHECK7-NEXT:    [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6
11085 // CHECK7-NEXT:    [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32*
11086 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP134]], align 4
11087 // CHECK7-NEXT:    [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
11088 // CHECK7-NEXT:    store i64 4, i64* [[TMP135]], align 4
11089 // CHECK7-NEXT:    [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6
11090 // CHECK7-NEXT:    store i8* null, i8** [[TMP136]], align 4
11091 // CHECK7-NEXT:    [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7
11092 // CHECK7-NEXT:    [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double**
11093 // CHECK7-NEXT:    store double* [[VLA1]], double** [[TMP138]], align 4
11094 // CHECK7-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7
11095 // CHECK7-NEXT:    [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double**
11096 // CHECK7-NEXT:    store double* [[VLA1]], double** [[TMP140]], align 4
11097 // CHECK7-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
11098 // CHECK7-NEXT:    store i64 [[TMP94]], i64* [[TMP141]], align 4
11099 // CHECK7-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7
11100 // CHECK7-NEXT:    store i8* null, i8** [[TMP142]], align 4
11101 // CHECK7-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8
11102 // CHECK7-NEXT:    [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT**
11103 // CHECK7-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4
11104 // CHECK7-NEXT:    [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8
11105 // CHECK7-NEXT:    [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT**
11106 // CHECK7-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4
11107 // CHECK7-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
11108 // CHECK7-NEXT:    store i64 12, i64* [[TMP147]], align 4
11109 // CHECK7-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8
11110 // CHECK7-NEXT:    store i8* null, i8** [[TMP148]], align 4
11111 // CHECK7-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
11112 // CHECK7-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
11113 // CHECK7-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
11114 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
11115 // CHECK7-NEXT:    [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11116 // CHECK7-NEXT:    [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0
11117 // CHECK7-NEXT:    br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
11118 // CHECK7:       omp_offload.failed25:
11119 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
11120 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT26]]
11121 // CHECK7:       omp_offload.cont26:
11122 // CHECK7-NEXT:    br label [[OMP_IF_END28:%.*]]
11123 // CHECK7:       omp_if.else27:
11124 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
11125 // CHECK7-NEXT:    br label [[OMP_IF_END28]]
11126 // CHECK7:       omp_if.end28:
11127 // CHECK7-NEXT:    [[TMP154:%.*]] = load i32, i32* [[A]], align 4
11128 // CHECK7-NEXT:    [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
11129 // CHECK7-NEXT:    call void @llvm.stackrestore(i8* [[TMP155]])
11130 // CHECK7-NEXT:    ret i32 [[TMP154]]
11131 //
11132 //
11133 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
11134 // CHECK7-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
11135 // CHECK7-NEXT:  entry:
11136 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11137 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11138 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
11139 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
11140 // CHECK7-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
11141 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11142 // CHECK7-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11143 // CHECK7-NEXT:    store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
11144 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11145 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11146 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
11147 // CHECK7-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
11148 // CHECK7-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
11149 // CHECK7-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
11150 // CHECK7-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
11151 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
11152 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP4]])
11153 // CHECK7-NEXT:    ret void
11154 //
11155 //
11156 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined.
11157 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
11158 // CHECK7-NEXT:  entry:
11159 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11160 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11161 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11162 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11163 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11164 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11165 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11166 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11167 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11168 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
11169 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11170 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11171 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11172 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11173 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11174 // CHECK7-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
11175 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11176 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11177 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11178 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
11179 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11180 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11181 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
11182 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11183 // CHECK7:       cond.true:
11184 // CHECK7-NEXT:    br label [[COND_END:%.*]]
11185 // CHECK7:       cond.false:
11186 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11187 // CHECK7-NEXT:    br label [[COND_END]]
11188 // CHECK7:       cond.end:
11189 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
11190 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11191 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11192 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
11193 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11194 // CHECK7:       omp.inner.for.cond:
11195 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
11196 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
11197 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
11198 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11199 // CHECK7:       omp.inner.for.body:
11200 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
11201 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
11202 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11203 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
11204 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11205 // CHECK7:       omp.body.continue:
11206 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11207 // CHECK7:       omp.inner.for.inc:
11208 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
11209 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
11210 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
11211 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
11212 // CHECK7:       omp.inner.for.end:
11213 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11214 // CHECK7:       omp.loop.exit:
11215 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
11216 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11217 // CHECK7-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
11218 // CHECK7-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11219 // CHECK7:       .omp.final.then:
11220 // CHECK7-NEXT:    store i32 10, i32* [[I]], align 4
11221 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11222 // CHECK7:       .omp.final.done:
11223 // CHECK7-NEXT:    ret void
11224 //
11225 //
11226 // CHECK7-LABEL: define {{[^@]+}}@.omp_task_privates_map.
11227 // CHECK7-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i16** noalias noundef [[TMP1:%.*]], [3 x i8*]** noalias noundef [[TMP2:%.*]], [3 x i8*]** noalias noundef [[TMP3:%.*]], [3 x i64]** noalias noundef [[TMP4:%.*]]) #[[ATTR5:[0-9]+]] {
11228 // CHECK7-NEXT:  entry:
11229 // CHECK7-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 4
11230 // CHECK7-NEXT:    [[DOTADDR1:%.*]] = alloca i16**, align 4
11231 // CHECK7-NEXT:    [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 4
11232 // CHECK7-NEXT:    [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 4
11233 // CHECK7-NEXT:    [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 4
11234 // CHECK7-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 4
11235 // CHECK7-NEXT:    store i16** [[TMP1]], i16*** [[DOTADDR1]], align 4
11236 // CHECK7-NEXT:    store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 4
11237 // CHECK7-NEXT:    store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 4
11238 // CHECK7-NEXT:    store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 4
11239 // CHECK7-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 4
11240 // CHECK7-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0
11241 // CHECK7-NEXT:    [[TMP7:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 4
11242 // CHECK7-NEXT:    store [3 x i64]* [[TMP6]], [3 x i64]** [[TMP7]], align 4
11243 // CHECK7-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1
11244 // CHECK7-NEXT:    [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 4
11245 // CHECK7-NEXT:    store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 4
11246 // CHECK7-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2
11247 // CHECK7-NEXT:    [[TMP11:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 4
11248 // CHECK7-NEXT:    store [3 x i8*]* [[TMP10]], [3 x i8*]** [[TMP11]], align 4
11249 // CHECK7-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3
11250 // CHECK7-NEXT:    [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 4
11251 // CHECK7-NEXT:    store i16* [[TMP12]], i16** [[TMP13]], align 4
11252 // CHECK7-NEXT:    ret void
11253 //
11254 //
11255 // CHECK7-LABEL: define {{[^@]+}}@.omp_task_entry.
11256 // CHECK7-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
11257 // CHECK7-NEXT:  entry:
11258 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
11259 // CHECK7-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
11260 // CHECK7-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
11261 // CHECK7-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
11262 // CHECK7-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
11263 // CHECK7-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
11264 // CHECK7-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 4
11265 // CHECK7-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 4
11266 // CHECK7-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 4
11267 // CHECK7-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 4
11268 // CHECK7-NEXT:    [[AA_CASTED_I:%.*]] = alloca i32, align 4
11269 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED_I:%.*]] = alloca i32, align 4
11270 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED4_I:%.*]] = alloca i32, align 4
11271 // CHECK7-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
11272 // CHECK7-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
11273 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
11274 // CHECK7-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
11275 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
11276 // CHECK7-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
11277 // CHECK7-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
11278 // CHECK7-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
11279 // CHECK7-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
11280 // CHECK7-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
11281 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
11282 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
11283 // CHECK7-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
11284 // CHECK7-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
11285 // CHECK7-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
11286 // CHECK7-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
11287 // CHECK7-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]])
11288 // CHECK7-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]])
11289 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27
11290 // CHECK7-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !27
11291 // CHECK7-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
11292 // CHECK7-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
11293 // CHECK7-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !27
11294 // CHECK7-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
11295 // CHECK7-NEXT:    [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
11296 // CHECK7-NEXT:    [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
11297 // CHECK7-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
11298 // CHECK7-NEXT:    [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)*
11299 // CHECK7-NEXT:    call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
11300 // CHECK7-NEXT:    [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !27
11301 // CHECK7-NEXT:    [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !27
11302 // CHECK7-NEXT:    [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !27
11303 // CHECK7-NEXT:    [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !27
11304 // CHECK7-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i32 0, i32 0
11305 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i32 0, i32 0
11306 // CHECK7-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i32 0, i32 0
11307 // CHECK7-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
11308 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
11309 // CHECK7-NEXT:    [[TMP25:%.*]] = load i32, i32* [[TMP23]], align 4
11310 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) #[[ATTR4]]
11311 // CHECK7-NEXT:    [[TMP26:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 [[TMP25]], i32 1, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
11312 // CHECK7-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
11313 // CHECK7-NEXT:    br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
11314 // CHECK7:       omp_offload.failed.i:
11315 // CHECK7-NEXT:    [[TMP28:%.*]] = load i16, i16* [[TMP16]], align 2
11316 // CHECK7-NEXT:    [[CONV_I:%.*]] = bitcast i32* [[AA_CASTED_I]] to i16*
11317 // CHECK7-NEXT:    store i16 [[TMP28]], i16* [[CONV_I]], align 2, !noalias !27
11318 // CHECK7-NEXT:    [[TMP29:%.*]] = load i32, i32* [[AA_CASTED_I]], align 4, !noalias !27
11319 // CHECK7-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP23]], align 4
11320 // CHECK7-NEXT:    store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27
11321 // CHECK7-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27
11322 // CHECK7-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP24]], align 4
11323 // CHECK7-NEXT:    store i32 [[TMP32]], i32* [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27
11324 // CHECK7-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27
11325 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i32 [[TMP29]], i32 [[TMP31]], i32 [[TMP33]]) #[[ATTR4]]
11326 // CHECK7-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
11327 // CHECK7:       .omp_outlined..1.exit:
11328 // CHECK7-NEXT:    ret i32 0
11329 //
11330 //
11331 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102
11332 // CHECK7-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
11333 // CHECK7-NEXT:  entry:
11334 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11335 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
11336 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11337 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
11338 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
11339 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
11340 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]])
11341 // CHECK7-NEXT:    ret void
11342 //
11343 //
11344 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2
11345 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
11346 // CHECK7-NEXT:  entry:
11347 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11348 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11349 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11350 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11351 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11352 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11353 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11354 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11355 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11356 // CHECK7-NEXT:    [[A1:%.*]] = alloca i32, align 4
11357 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11358 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11359 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11360 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11361 // CHECK7-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
11362 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11363 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11364 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11365 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
11366 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11367 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11368 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
11369 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11370 // CHECK7:       cond.true:
11371 // CHECK7-NEXT:    br label [[COND_END:%.*]]
11372 // CHECK7:       cond.false:
11373 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11374 // CHECK7-NEXT:    br label [[COND_END]]
11375 // CHECK7:       cond.end:
11376 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
11377 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11378 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11379 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
11380 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11381 // CHECK7:       omp.inner.for.cond:
11382 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11383 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11384 // CHECK7-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
11385 // CHECK7-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11386 // CHECK7:       omp.inner.for.body:
11387 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11388 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
11389 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11390 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4, !nontemporal !28
11391 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A1]], align 4, !nontemporal !28
11392 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
11393 // CHECK7-NEXT:    store i32 [[ADD3]], i32* [[A1]], align 4, !nontemporal !28
11394 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11395 // CHECK7:       omp.body.continue:
11396 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11397 // CHECK7:       omp.inner.for.inc:
11398 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11399 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1
11400 // CHECK7-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
11401 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
11402 // CHECK7:       omp.inner.for.end:
11403 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11404 // CHECK7:       omp.loop.exit:
11405 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
11406 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11407 // CHECK7-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
11408 // CHECK7-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11409 // CHECK7:       .omp.final.then:
11410 // CHECK7-NEXT:    store i32 10, i32* [[A_ADDR]], align 4
11411 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11412 // CHECK7:       .omp.final.done:
11413 // CHECK7-NEXT:    ret void
11414 //
11415 //
11416 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
11417 // CHECK7-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR2]] {
11418 // CHECK7-NEXT:  entry:
11419 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11420 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
11421 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11422 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11423 // CHECK7-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
11424 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
11425 // CHECK7-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
11426 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
11427 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]])
11428 // CHECK7-NEXT:    ret void
11429 //
11430 //
11431 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3
11432 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
11433 // CHECK7-NEXT:  entry:
11434 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11435 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11436 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11437 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11438 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11439 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11440 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11441 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11442 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11443 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
11444 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11445 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11446 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11447 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11448 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11449 // CHECK7-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
11450 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11451 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11452 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11453 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
11454 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11455 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11456 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
11457 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11458 // CHECK7:       cond.true:
11459 // CHECK7-NEXT:    br label [[COND_END:%.*]]
11460 // CHECK7:       cond.false:
11461 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11462 // CHECK7-NEXT:    br label [[COND_END]]
11463 // CHECK7:       cond.end:
11464 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
11465 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11466 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11467 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
11468 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11469 // CHECK7:       omp.inner.for.cond:
11470 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
11471 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
11472 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
11473 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11474 // CHECK7:       omp.inner.for.body:
11475 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
11476 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
11477 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11478 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !31
11479 // CHECK7-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !31
11480 // CHECK7-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
11481 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
11482 // CHECK7-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
11483 // CHECK7-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !31
11484 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11485 // CHECK7:       omp.body.continue:
11486 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11487 // CHECK7:       omp.inner.for.inc:
11488 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
11489 // CHECK7-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
11490 // CHECK7-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
11491 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
11492 // CHECK7:       omp.inner.for.end:
11493 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11494 // CHECK7:       omp.loop.exit:
11495 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
11496 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11497 // CHECK7-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
11498 // CHECK7-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11499 // CHECK7:       .omp.final.then:
11500 // CHECK7-NEXT:    store i32 10, i32* [[I]], align 4
11501 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11502 // CHECK7:       .omp.final.done:
11503 // CHECK7-NEXT:    ret void
11504 //
11505 //
11506 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
11507 // CHECK7-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] {
11508 // CHECK7-NEXT:  entry:
11509 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11510 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11511 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
11512 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
11513 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11514 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11515 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11516 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
11517 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
11518 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
11519 // CHECK7-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
11520 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
11521 // CHECK7-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
11522 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
11523 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
11524 // CHECK7-NEXT:    ret void
11525 //
11526 //
11527 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..6
11528 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
11529 // CHECK7-NEXT:  entry:
11530 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11531 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11532 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11533 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11534 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11535 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11536 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11537 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11538 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11539 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11540 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
11541 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11542 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11543 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11544 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11545 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11546 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11547 // CHECK7-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
11548 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11549 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11550 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11551 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
11552 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11553 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11554 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
11555 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11556 // CHECK7:       cond.true:
11557 // CHECK7-NEXT:    br label [[COND_END:%.*]]
11558 // CHECK7:       cond.false:
11559 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11560 // CHECK7-NEXT:    br label [[COND_END]]
11561 // CHECK7:       cond.end:
11562 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
11563 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11564 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11565 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
11566 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11567 // CHECK7:       omp.inner.for.cond:
11568 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
11569 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
11570 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
11571 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11572 // CHECK7:       omp.inner.for.body:
11573 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
11574 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
11575 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11576 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !34
11577 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !34
11578 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
11579 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !34
11580 // CHECK7-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !34
11581 // CHECK7-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP9]] to i32
11582 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
11583 // CHECK7-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
11584 // CHECK7-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !34
11585 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11586 // CHECK7:       omp.body.continue:
11587 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11588 // CHECK7:       omp.inner.for.inc:
11589 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
11590 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1
11591 // CHECK7-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
11592 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
11593 // CHECK7:       omp.inner.for.end:
11594 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11595 // CHECK7:       omp.loop.exit:
11596 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
11597 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11598 // CHECK7-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
11599 // CHECK7-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11600 // CHECK7:       .omp.final.then:
11601 // CHECK7-NEXT:    store i32 10, i32* [[I]], align 4
11602 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11603 // CHECK7:       .omp.final.done:
11604 // CHECK7-NEXT:    ret void
11605 //
11606 //
11607 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
11608 // CHECK7-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
11609 // CHECK7-NEXT:  entry:
11610 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11611 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
11612 // CHECK7-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
11613 // CHECK7-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
11614 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
11615 // CHECK7-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
11616 // CHECK7-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
11617 // CHECK7-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
11618 // CHECK7-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
11619 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
11620 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11621 // CHECK7-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
11622 // CHECK7-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
11623 // CHECK7-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
11624 // CHECK7-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
11625 // CHECK7-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
11626 // CHECK7-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
11627 // CHECK7-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
11628 // CHECK7-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
11629 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
11630 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
11631 // CHECK7-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
11632 // CHECK7-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
11633 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
11634 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
11635 // CHECK7-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
11636 // CHECK7-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
11637 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
11638 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
11639 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
11640 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
11641 // CHECK7-NEXT:    ret void
11642 //
11643 //
11644 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..9
11645 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR3]] {
11646 // CHECK7-NEXT:  entry:
11647 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11648 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11649 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11650 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
11651 // CHECK7-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
11652 // CHECK7-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
11653 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
11654 // CHECK7-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
11655 // CHECK7-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
11656 // CHECK7-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
11657 // CHECK7-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
11658 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11659 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11660 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11661 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11662 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11663 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11664 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
11665 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11666 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11667 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11668 // CHECK7-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
11669 // CHECK7-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
11670 // CHECK7-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
11671 // CHECK7-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
11672 // CHECK7-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
11673 // CHECK7-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
11674 // CHECK7-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
11675 // CHECK7-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
11676 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
11677 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
11678 // CHECK7-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
11679 // CHECK7-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
11680 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
11681 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
11682 // CHECK7-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
11683 // CHECK7-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
11684 // CHECK7-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 0
11685 // CHECK7-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
11686 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11687 // CHECK7-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
11688 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11689 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11690 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11691 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
11692 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11693 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11694 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
11695 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11696 // CHECK7:       cond.true:
11697 // CHECK7-NEXT:    br label [[COND_END:%.*]]
11698 // CHECK7:       cond.false:
11699 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11700 // CHECK7-NEXT:    br label [[COND_END]]
11701 // CHECK7:       cond.end:
11702 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
11703 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11704 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11705 // CHECK7-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
11706 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11707 // CHECK7:       omp.inner.for.cond:
11708 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
11709 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !37
11710 // CHECK7-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
11711 // CHECK7-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11712 // CHECK7:       omp.inner.for.body:
11713 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
11714 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
11715 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11716 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !37
11717 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !37
11718 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
11719 // CHECK7-NEXT:    store i32 [[ADD6]], i32* [[A_ADDR]], align 4, !llvm.access.group !37
11720 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
11721 // CHECK7-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !37
11722 // CHECK7-NEXT:    [[CONV:%.*]] = fpext float [[TMP17]] to double
11723 // CHECK7-NEXT:    [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00
11724 // CHECK7-NEXT:    [[CONV8:%.*]] = fptrunc double [[ADD7]] to float
11725 // CHECK7-NEXT:    store float [[CONV8]], float* [[ARRAYIDX]], align 4, !llvm.access.group !37
11726 // CHECK7-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
11727 // CHECK7-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4, !llvm.access.group !37
11728 // CHECK7-NEXT:    [[CONV10:%.*]] = fpext float [[TMP18]] to double
11729 // CHECK7-NEXT:    [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00
11730 // CHECK7-NEXT:    [[CONV12:%.*]] = fptrunc double [[ADD11]] to float
11731 // CHECK7-NEXT:    store float [[CONV12]], float* [[ARRAYIDX9]], align 4, !llvm.access.group !37
11732 // CHECK7-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
11733 // CHECK7-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX13]], i32 0, i32 2
11734 // CHECK7-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !37
11735 // CHECK7-NEXT:    [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00
11736 // CHECK7-NEXT:    store double [[ADD15]], double* [[ARRAYIDX14]], align 8, !llvm.access.group !37
11737 // CHECK7-NEXT:    [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]]
11738 // CHECK7-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP20]]
11739 // CHECK7-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX16]], i32 3
11740 // CHECK7-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !37
11741 // CHECK7-NEXT:    [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00
11742 // CHECK7-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !37
11743 // CHECK7-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
11744 // CHECK7-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !37
11745 // CHECK7-NEXT:    [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1
11746 // CHECK7-NEXT:    store i64 [[ADD19]], i64* [[X]], align 4, !llvm.access.group !37
11747 // CHECK7-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
11748 // CHECK7-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !37
11749 // CHECK7-NEXT:    [[CONV20:%.*]] = sext i8 [[TMP23]] to i32
11750 // CHECK7-NEXT:    [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1
11751 // CHECK7-NEXT:    [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8
11752 // CHECK7-NEXT:    store i8 [[CONV22]], i8* [[Y]], align 4, !llvm.access.group !37
11753 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11754 // CHECK7:       omp.body.continue:
11755 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11756 // CHECK7:       omp.inner.for.inc:
11757 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
11758 // CHECK7-NEXT:    [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1
11759 // CHECK7-NEXT:    store i32 [[ADD23]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
11760 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
11761 // CHECK7:       omp.inner.for.end:
11762 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11763 // CHECK7:       omp.loop.exit:
11764 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
11765 // CHECK7-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11766 // CHECK7-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
11767 // CHECK7-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11768 // CHECK7:       .omp.final.then:
11769 // CHECK7-NEXT:    store i32 10, i32* [[I]], align 4
11770 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11771 // CHECK7:       .omp.final.done:
11772 // CHECK7-NEXT:    ret void
11773 //
11774 //
11775 // CHECK7-LABEL: define {{[^@]+}}@_Z3bari
11776 // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
11777 // CHECK7-NEXT:  entry:
11778 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
11779 // CHECK7-NEXT:    [[A:%.*]] = alloca i32, align 4
11780 // CHECK7-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
11781 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
11782 // CHECK7-NEXT:    store i32 0, i32* [[A]], align 4
11783 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
11784 // CHECK7-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
11785 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
11786 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
11787 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
11788 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
11789 // CHECK7-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
11790 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
11791 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
11792 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
11793 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
11794 // CHECK7-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
11795 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
11796 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
11797 // CHECK7-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
11798 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
11799 // CHECK7-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
11800 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
11801 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
11802 // CHECK7-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
11803 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
11804 // CHECK7-NEXT:    ret i32 [[TMP8]]
11805 //
11806 //
11807 // CHECK7-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
11808 // CHECK7-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
11809 // CHECK7-NEXT:  entry:
11810 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
11811 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
11812 // CHECK7-NEXT:    [[B:%.*]] = alloca i32, align 4
11813 // CHECK7-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
11814 // CHECK7-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
11815 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
11816 // CHECK7-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
11817 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11818 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4
11819 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4
11820 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4
11821 // CHECK7-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4
11822 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11823 // CHECK7-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
11824 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
11825 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
11826 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
11827 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
11828 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
11829 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
11830 // CHECK7-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
11831 // CHECK7-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
11832 // CHECK7-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
11833 // CHECK7-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
11834 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
11835 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
11836 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
11837 // CHECK7-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
11838 // CHECK7-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
11839 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B]], align 4
11840 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[B_CASTED]], align 4
11841 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
11842 // CHECK7-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
11843 // CHECK7-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
11844 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
11845 // CHECK7-NEXT:    [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8
11846 // CHECK7-NEXT:    store i8 [[FROMBOOL2]], i8* [[CONV]], align 1
11847 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
11848 // CHECK7-NEXT:    [[TMP9:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
11849 // CHECK7-NEXT:    [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1
11850 // CHECK7-NEXT:    br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
11851 // CHECK7:       omp_if.then:
11852 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
11853 // CHECK7-NEXT:    [[TMP10:%.*]] = mul nuw i32 2, [[TMP1]]
11854 // CHECK7-NEXT:    [[TMP11:%.*]] = mul nuw i32 [[TMP10]], 2
11855 // CHECK7-NEXT:    [[TMP12:%.*]] = sext i32 [[TMP11]] to i64
11856 // CHECK7-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11857 // CHECK7-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.S1**
11858 // CHECK7-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP14]], align 4
11859 // CHECK7-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11860 // CHECK7-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
11861 // CHECK7-NEXT:    store double* [[A]], double** [[TMP16]], align 4
11862 // CHECK7-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
11863 // CHECK7-NEXT:    store i64 8, i64* [[TMP17]], align 4
11864 // CHECK7-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
11865 // CHECK7-NEXT:    store i8* null, i8** [[TMP18]], align 4
11866 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
11867 // CHECK7-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
11868 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[TMP20]], align 4
11869 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
11870 // CHECK7-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
11871 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[TMP22]], align 4
11872 // CHECK7-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
11873 // CHECK7-NEXT:    store i64 4, i64* [[TMP23]], align 4
11874 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
11875 // CHECK7-NEXT:    store i8* null, i8** [[TMP24]], align 4
11876 // CHECK7-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
11877 // CHECK7-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32*
11878 // CHECK7-NEXT:    store i32 2, i32* [[TMP26]], align 4
11879 // CHECK7-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
11880 // CHECK7-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
11881 // CHECK7-NEXT:    store i32 2, i32* [[TMP28]], align 4
11882 // CHECK7-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
11883 // CHECK7-NEXT:    store i64 4, i64* [[TMP29]], align 4
11884 // CHECK7-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
11885 // CHECK7-NEXT:    store i8* null, i8** [[TMP30]], align 4
11886 // CHECK7-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
11887 // CHECK7-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32*
11888 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP32]], align 4
11889 // CHECK7-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
11890 // CHECK7-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32*
11891 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP34]], align 4
11892 // CHECK7-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
11893 // CHECK7-NEXT:    store i64 4, i64* [[TMP35]], align 4
11894 // CHECK7-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
11895 // CHECK7-NEXT:    store i8* null, i8** [[TMP36]], align 4
11896 // CHECK7-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
11897 // CHECK7-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16**
11898 // CHECK7-NEXT:    store i16* [[VLA]], i16** [[TMP38]], align 4
11899 // CHECK7-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
11900 // CHECK7-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16**
11901 // CHECK7-NEXT:    store i16* [[VLA]], i16** [[TMP40]], align 4
11902 // CHECK7-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
11903 // CHECK7-NEXT:    store i64 [[TMP12]], i64* [[TMP41]], align 4
11904 // CHECK7-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
11905 // CHECK7-NEXT:    store i8* null, i8** [[TMP42]], align 4
11906 // CHECK7-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
11907 // CHECK7-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
11908 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[TMP44]], align 4
11909 // CHECK7-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
11910 // CHECK7-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
11911 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[TMP46]], align 4
11912 // CHECK7-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
11913 // CHECK7-NEXT:    store i64 1, i64* [[TMP47]], align 4
11914 // CHECK7-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5
11915 // CHECK7-NEXT:    store i8* null, i8** [[TMP48]], align 4
11916 // CHECK7-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11917 // CHECK7-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11918 // CHECK7-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
11919 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
11920 // CHECK7-NEXT:    [[TMP52:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11921 // CHECK7-NEXT:    [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0
11922 // CHECK7-NEXT:    br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
11923 // CHECK7:       omp_offload.failed:
11924 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]]
11925 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
11926 // CHECK7:       omp_offload.cont:
11927 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
11928 // CHECK7:       omp_if.else:
11929 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]]
11930 // CHECK7-NEXT:    br label [[OMP_IF_END]]
11931 // CHECK7:       omp_if.end:
11932 // CHECK7-NEXT:    [[TMP54:%.*]] = mul nsw i32 1, [[TMP1]]
11933 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP54]]
11934 // CHECK7-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
11935 // CHECK7-NEXT:    [[TMP55:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2
11936 // CHECK7-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP55]] to i32
11937 // CHECK7-NEXT:    [[TMP56:%.*]] = load i32, i32* [[B]], align 4
11938 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], [[TMP56]]
11939 // CHECK7-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
11940 // CHECK7-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
11941 // CHECK7-NEXT:    ret i32 [[ADD6]]
11942 //
11943 //
11944 // CHECK7-LABEL: define {{[^@]+}}@_ZL7fstatici
11945 // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
11946 // CHECK7-NEXT:  entry:
11947 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
11948 // CHECK7-NEXT:    [[A:%.*]] = alloca i32, align 4
11949 // CHECK7-NEXT:    [[AA:%.*]] = alloca i16, align 2
11950 // CHECK7-NEXT:    [[AAA:%.*]] = alloca i8, align 1
11951 // CHECK7-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
11952 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
11953 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
11954 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
11955 // CHECK7-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
11956 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
11957 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
11958 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
11959 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11960 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11961 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11962 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
11963 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
11964 // CHECK7-NEXT:    store i32 0, i32* [[A]], align 4
11965 // CHECK7-NEXT:    store i16 0, i16* [[AA]], align 2
11966 // CHECK7-NEXT:    store i8 0, i8* [[AAA]], align 1
11967 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
11968 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
11969 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
11970 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
11971 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
11972 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
11973 // CHECK7-NEXT:    [[TMP4:%.*]] = load i16, i16* [[AA]], align 2
11974 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
11975 // CHECK7-NEXT:    store i16 [[TMP4]], i16* [[CONV]], align 2
11976 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[AA_CASTED]], align 4
11977 // CHECK7-NEXT:    [[TMP6:%.*]] = load i8, i8* [[AAA]], align 1
11978 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
11979 // CHECK7-NEXT:    store i8 [[TMP6]], i8* [[CONV1]], align 1
11980 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
11981 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[N_ADDR]], align 4
11982 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50
11983 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
11984 // CHECK7:       omp_if.then:
11985 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11986 // CHECK7-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
11987 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
11988 // CHECK7-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11989 // CHECK7-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32*
11990 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP12]], align 4
11991 // CHECK7-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
11992 // CHECK7-NEXT:    store i8* null, i8** [[TMP13]], align 4
11993 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
11994 // CHECK7-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
11995 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
11996 // CHECK7-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
11997 // CHECK7-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
11998 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP17]], align 4
11999 // CHECK7-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
12000 // CHECK7-NEXT:    store i8* null, i8** [[TMP18]], align 4
12001 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
12002 // CHECK7-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
12003 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[TMP20]], align 4
12004 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
12005 // CHECK7-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
12006 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[TMP22]], align 4
12007 // CHECK7-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
12008 // CHECK7-NEXT:    store i8* null, i8** [[TMP23]], align 4
12009 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
12010 // CHECK7-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
12011 // CHECK7-NEXT:    store i32 [[TMP7]], i32* [[TMP25]], align 4
12012 // CHECK7-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
12013 // CHECK7-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
12014 // CHECK7-NEXT:    store i32 [[TMP7]], i32* [[TMP27]], align 4
12015 // CHECK7-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
12016 // CHECK7-NEXT:    store i8* null, i8** [[TMP28]], align 4
12017 // CHECK7-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
12018 // CHECK7-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [10 x i32]**
12019 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP30]], align 4
12020 // CHECK7-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
12021 // CHECK7-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [10 x i32]**
12022 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP32]], align 4
12023 // CHECK7-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
12024 // CHECK7-NEXT:    store i8* null, i8** [[TMP33]], align 4
12025 // CHECK7-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12026 // CHECK7-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12027 // CHECK7-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
12028 // CHECK7-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_]], align 4
12029 // CHECK7-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N_ADDR]], align 4
12030 // CHECK7-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12031 // CHECK7-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12032 // CHECK7-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12033 // CHECK7-NEXT:    [[SUB:%.*]] = sub i32 [[TMP38]], [[TMP39]]
12034 // CHECK7-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
12035 // CHECK7-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
12036 // CHECK7-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
12037 // CHECK7-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
12038 // CHECK7-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
12039 // CHECK7-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12040 // CHECK7-NEXT:    [[ADD6:%.*]] = add i32 [[TMP40]], 1
12041 // CHECK7-NEXT:    [[TMP41:%.*]] = zext i32 [[ADD6]] to i64
12042 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]])
12043 // CHECK7-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
12044 // CHECK7-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
12045 // CHECK7-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
12046 // CHECK7:       omp_offload.failed:
12047 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
12048 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
12049 // CHECK7:       omp_offload.cont:
12050 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
12051 // CHECK7:       omp_if.else:
12052 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
12053 // CHECK7-NEXT:    br label [[OMP_IF_END]]
12054 // CHECK7:       omp_if.end:
12055 // CHECK7-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
12056 // CHECK7-NEXT:    ret i32 [[TMP44]]
12057 //
12058 //
12059 // CHECK7-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
12060 // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
12061 // CHECK7-NEXT:  entry:
12062 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12063 // CHECK7-NEXT:    [[A:%.*]] = alloca i32, align 4
12064 // CHECK7-NEXT:    [[AA:%.*]] = alloca i16, align 2
12065 // CHECK7-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
12066 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12067 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12068 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
12069 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
12070 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
12071 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12072 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12073 // CHECK7-NEXT:    store i32 0, i32* [[A]], align 4
12074 // CHECK7-NEXT:    store i16 0, i16* [[AA]], align 2
12075 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
12076 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
12077 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
12078 // CHECK7-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
12079 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12080 // CHECK7-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
12081 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12082 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
12083 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
12084 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
12085 // CHECK7:       omp_if.then:
12086 // CHECK7-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12087 // CHECK7-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
12088 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
12089 // CHECK7-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12090 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
12091 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
12092 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
12093 // CHECK7-NEXT:    store i8* null, i8** [[TMP9]], align 4
12094 // CHECK7-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
12095 // CHECK7-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
12096 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
12097 // CHECK7-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
12098 // CHECK7-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
12099 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
12100 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
12101 // CHECK7-NEXT:    store i8* null, i8** [[TMP14]], align 4
12102 // CHECK7-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
12103 // CHECK7-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
12104 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
12105 // CHECK7-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
12106 // CHECK7-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
12107 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
12108 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
12109 // CHECK7-NEXT:    store i8* null, i8** [[TMP19]], align 4
12110 // CHECK7-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12111 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12112 // CHECK7-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
12113 // CHECK7-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
12114 // CHECK7-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
12115 // CHECK7-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
12116 // CHECK7:       omp_offload.failed:
12117 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
12118 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
12119 // CHECK7:       omp_offload.cont:
12120 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
12121 // CHECK7:       omp_if.else:
12122 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
12123 // CHECK7-NEXT:    br label [[OMP_IF_END]]
12124 // CHECK7:       omp_if.end:
12125 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
12126 // CHECK7-NEXT:    ret i32 [[TMP24]]
12127 //
12128 //
12129 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
12130 // CHECK7-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12131 // CHECK7-NEXT:  entry:
12132 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
12133 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
12134 // CHECK7-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
12135 // CHECK7-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
12136 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
12137 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12138 // CHECK7-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
12139 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12140 // CHECK7-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
12141 // CHECK7-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
12142 // CHECK7-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
12143 // CHECK7-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
12144 // CHECK7-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
12145 // CHECK7-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12146 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
12147 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
12148 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
12149 // CHECK7-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
12150 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
12151 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
12152 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
12153 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
12154 // CHECK7-NEXT:    [[TMP6:%.*]] = load i8, i8* [[CONV]], align 1
12155 // CHECK7-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
12156 // CHECK7-NEXT:    [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
12157 // CHECK7-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
12158 // CHECK7-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV3]], align 1
12159 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
12160 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]], i32 [[TMP7]])
12161 // CHECK7-NEXT:    ret void
12162 //
12163 //
12164 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..11
12165 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
12166 // CHECK7-NEXT:  entry:
12167 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12168 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12169 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
12170 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
12171 // CHECK7-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
12172 // CHECK7-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
12173 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
12174 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12175 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12176 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12177 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12178 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12179 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12180 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12181 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
12182 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12183 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12184 // CHECK7-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
12185 // CHECK7-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
12186 // CHECK7-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
12187 // CHECK7-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
12188 // CHECK7-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
12189 // CHECK7-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12190 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
12191 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
12192 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
12193 // CHECK7-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
12194 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
12195 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12196 // CHECK7-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
12197 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12198 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12199 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12200 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
12201 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12202 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12203 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
12204 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12205 // CHECK7:       cond.true:
12206 // CHECK7-NEXT:    br label [[COND_END:%.*]]
12207 // CHECK7:       cond.false:
12208 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12209 // CHECK7-NEXT:    br label [[COND_END]]
12210 // CHECK7:       cond.end:
12211 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
12212 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12213 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12214 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
12215 // CHECK7-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1
12216 // CHECK7-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
12217 // CHECK7-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
12218 // CHECK7:       omp_if.then:
12219 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12220 // CHECK7:       omp.inner.for.cond:
12221 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
12222 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !40
12223 // CHECK7-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
12224 // CHECK7-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12225 // CHECK7:       omp.inner.for.body:
12226 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
12227 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
12228 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12229 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !40
12230 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !40
12231 // CHECK7-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double
12232 // CHECK7-NEXT:    [[ADD5:%.*]] = fadd double [[CONV4]], 1.500000e+00
12233 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
12234 // CHECK7-NEXT:    store double [[ADD5]], double* [[A]], align 4, !llvm.access.group !40
12235 // CHECK7-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
12236 // CHECK7-NEXT:    [[TMP14:%.*]] = load double, double* [[A6]], align 4, !llvm.access.group !40
12237 // CHECK7-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
12238 // CHECK7-NEXT:    store double [[INC]], double* [[A6]], align 4, !llvm.access.group !40
12239 // CHECK7-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
12240 // CHECK7-NEXT:    [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]]
12241 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]]
12242 // CHECK7-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
12243 // CHECK7-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !40
12244 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12245 // CHECK7:       omp.body.continue:
12246 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12247 // CHECK7:       omp.inner.for.inc:
12248 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
12249 // CHECK7-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
12250 // CHECK7-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
12251 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
12252 // CHECK7:       omp.inner.for.end:
12253 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
12254 // CHECK7:       omp_if.else:
12255 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
12256 // CHECK7:       omp.inner.for.cond10:
12257 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12258 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12259 // CHECK7-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12260 // CHECK7-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END26:%.*]]
12261 // CHECK7:       omp.inner.for.body12:
12262 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12263 // CHECK7-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1
12264 // CHECK7-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
12265 // CHECK7-NEXT:    store i32 [[ADD14]], i32* [[I]], align 4
12266 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[B_ADDR]], align 4
12267 // CHECK7-NEXT:    [[CONV15:%.*]] = sitofp i32 [[TMP20]] to double
12268 // CHECK7-NEXT:    [[ADD16:%.*]] = fadd double [[CONV15]], 1.500000e+00
12269 // CHECK7-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
12270 // CHECK7-NEXT:    store double [[ADD16]], double* [[A17]], align 4
12271 // CHECK7-NEXT:    [[A18:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
12272 // CHECK7-NEXT:    [[TMP21:%.*]] = load double, double* [[A18]], align 4
12273 // CHECK7-NEXT:    [[INC19:%.*]] = fadd double [[TMP21]], 1.000000e+00
12274 // CHECK7-NEXT:    store double [[INC19]], double* [[A18]], align 4
12275 // CHECK7-NEXT:    [[CONV20:%.*]] = fptosi double [[INC19]] to i16
12276 // CHECK7-NEXT:    [[TMP22:%.*]] = mul nsw i32 1, [[TMP2]]
12277 // CHECK7-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP22]]
12278 // CHECK7-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX21]], i32 1
12279 // CHECK7-NEXT:    store i16 [[CONV20]], i16* [[ARRAYIDX22]], align 2
12280 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE23:%.*]]
12281 // CHECK7:       omp.body.continue23:
12282 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC24:%.*]]
12283 // CHECK7:       omp.inner.for.inc24:
12284 // CHECK7-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12285 // CHECK7-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP23]], 1
12286 // CHECK7-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_IV]], align 4
12287 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP43:![0-9]+]]
12288 // CHECK7:       omp.inner.for.end26:
12289 // CHECK7-NEXT:    br label [[OMP_IF_END]]
12290 // CHECK7:       omp_if.end:
12291 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12292 // CHECK7:       omp.loop.exit:
12293 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
12294 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12295 // CHECK7-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
12296 // CHECK7-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12297 // CHECK7:       .omp.final.then:
12298 // CHECK7-NEXT:    store i32 10, i32* [[I]], align 4
12299 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12300 // CHECK7:       .omp.final.done:
12301 // CHECK7-NEXT:    ret void
12302 //
12303 //
12304 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
12305 // CHECK7-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
12306 // CHECK7-NEXT:  entry:
12307 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12308 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12309 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12310 // CHECK7-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
12311 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
12312 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12313 // CHECK7-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
12314 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12315 // CHECK7-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
12316 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12317 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12318 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12319 // CHECK7-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
12320 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
12321 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12322 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
12323 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
12324 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
12325 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
12326 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
12327 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
12328 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
12329 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
12330 // CHECK7-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
12331 // CHECK7-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12332 // CHECK7-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
12333 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12334 // CHECK7-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV1]], align 1
12335 // CHECK7-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
12336 // CHECK7-NEXT:    store i8 [[TMP7]], i8* [[CONV3]], align 1
12337 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
12338 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]])
12339 // CHECK7-NEXT:    ret void
12340 //
12341 //
12342 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..13
12343 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
12344 // CHECK7-NEXT:  entry:
12345 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12346 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12347 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12348 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12349 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12350 // CHECK7-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
12351 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
12352 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12353 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12354 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12355 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12356 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
12357 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
12358 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12359 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12360 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12361 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12362 // CHECK7-NEXT:    [[I6:%.*]] = alloca i32, align 4
12363 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12364 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12365 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12366 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12367 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12368 // CHECK7-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
12369 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
12370 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12371 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
12372 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
12373 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
12374 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
12375 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
12376 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12377 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12378 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12379 // CHECK7-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
12380 // CHECK7-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
12381 // CHECK7-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
12382 // CHECK7-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
12383 // CHECK7-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
12384 // CHECK7-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
12385 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12386 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
12387 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12388 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12389 // CHECK7-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
12390 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12391 // CHECK7:       omp.precond.then:
12392 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12393 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12394 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
12395 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12396 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12397 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12398 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
12399 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12400 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12401 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12402 // CHECK7-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
12403 // CHECK7-NEXT:    br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12404 // CHECK7:       cond.true:
12405 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
12406 // CHECK7-NEXT:    br label [[COND_END:%.*]]
12407 // CHECK7:       cond.false:
12408 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12409 // CHECK7-NEXT:    br label [[COND_END]]
12410 // CHECK7:       cond.end:
12411 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
12412 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12413 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12414 // CHECK7-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
12415 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12416 // CHECK7:       omp.inner.for.cond:
12417 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
12418 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !45
12419 // CHECK7-NEXT:    [[ADD8:%.*]] = add i32 [[TMP17]], 1
12420 // CHECK7-NEXT:    [[CMP9:%.*]] = icmp ult i32 [[TMP16]], [[ADD8]]
12421 // CHECK7-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12422 // CHECK7:       omp.inner.for.body:
12423 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !45
12424 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
12425 // CHECK7-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
12426 // CHECK7-NEXT:    [[ADD10:%.*]] = add i32 [[TMP18]], [[MUL]]
12427 // CHECK7-NEXT:    store i32 [[ADD10]], i32* [[I6]], align 4, !llvm.access.group !45
12428 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !45
12429 // CHECK7-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP20]], 1
12430 // CHECK7-NEXT:    store i32 [[ADD11]], i32* [[A_ADDR]], align 4, !llvm.access.group !45
12431 // CHECK7-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !45
12432 // CHECK7-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP21]] to i32
12433 // CHECK7-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
12434 // CHECK7-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
12435 // CHECK7-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !45
12436 // CHECK7-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV1]], align 1, !llvm.access.group !45
12437 // CHECK7-NEXT:    [[CONV15:%.*]] = sext i8 [[TMP22]] to i32
12438 // CHECK7-NEXT:    [[ADD16:%.*]] = add nsw i32 [[CONV15]], 1
12439 // CHECK7-NEXT:    [[CONV17:%.*]] = trunc i32 [[ADD16]] to i8
12440 // CHECK7-NEXT:    store i8 [[CONV17]], i8* [[CONV1]], align 1, !llvm.access.group !45
12441 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
12442 // CHECK7-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
12443 // CHECK7-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP23]], 1
12444 // CHECK7-NEXT:    store i32 [[ADD18]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
12445 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12446 // CHECK7:       omp.body.continue:
12447 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12448 // CHECK7:       omp.inner.for.inc:
12449 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
12450 // CHECK7-NEXT:    [[ADD19:%.*]] = add i32 [[TMP24]], 1
12451 // CHECK7-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
12452 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
12453 // CHECK7:       omp.inner.for.end:
12454 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12455 // CHECK7:       omp.loop.exit:
12456 // CHECK7-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12457 // CHECK7-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
12458 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
12459 // CHECK7-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12460 // CHECK7-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
12461 // CHECK7-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12462 // CHECK7:       .omp.final.then:
12463 // CHECK7-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12464 // CHECK7-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12465 // CHECK7-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12466 // CHECK7-NEXT:    [[SUB20:%.*]] = sub i32 [[TMP30]], [[TMP31]]
12467 // CHECK7-NEXT:    [[SUB21:%.*]] = sub i32 [[SUB20]], 1
12468 // CHECK7-NEXT:    [[ADD22:%.*]] = add i32 [[SUB21]], 1
12469 // CHECK7-NEXT:    [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
12470 // CHECK7-NEXT:    [[MUL24:%.*]] = mul i32 [[DIV23]], 1
12471 // CHECK7-NEXT:    [[ADD25:%.*]] = add i32 [[TMP29]], [[MUL24]]
12472 // CHECK7-NEXT:    store i32 [[ADD25]], i32* [[I6]], align 4
12473 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12474 // CHECK7:       .omp.final.done:
12475 // CHECK7-NEXT:    br label [[OMP_PRECOND_END]]
12476 // CHECK7:       omp.precond.end:
12477 // CHECK7-NEXT:    ret void
12478 //
12479 //
12480 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
12481 // CHECK7-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
12482 // CHECK7-NEXT:  entry:
12483 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12484 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12485 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
12486 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12487 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12488 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12489 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12490 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
12491 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12492 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
12493 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
12494 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
12495 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
12496 // CHECK7-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
12497 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12498 // CHECK7-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
12499 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12500 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
12501 // CHECK7-NEXT:    ret void
12502 //
12503 //
12504 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..16
12505 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
12506 // CHECK7-NEXT:  entry:
12507 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12508 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12509 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12510 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12511 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
12512 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12513 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12514 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12515 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12516 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12517 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12518 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
12519 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12520 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12521 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12522 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12523 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
12524 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12525 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
12526 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12527 // CHECK7-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
12528 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12529 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12530 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12531 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
12532 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12533 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12534 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
12535 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12536 // CHECK7:       cond.true:
12537 // CHECK7-NEXT:    br label [[COND_END:%.*]]
12538 // CHECK7:       cond.false:
12539 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12540 // CHECK7-NEXT:    br label [[COND_END]]
12541 // CHECK7:       cond.end:
12542 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
12543 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12544 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12545 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
12546 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12547 // CHECK7:       omp.inner.for.cond:
12548 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
12549 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !48
12550 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
12551 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12552 // CHECK7:       omp.inner.for.body:
12553 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
12554 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
12555 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12556 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !48
12557 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !48
12558 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
12559 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !48
12560 // CHECK7-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !48
12561 // CHECK7-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
12562 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
12563 // CHECK7-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
12564 // CHECK7-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !48
12565 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
12566 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !48
12567 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
12568 // CHECK7-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !48
12569 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12570 // CHECK7:       omp.body.continue:
12571 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12572 // CHECK7:       omp.inner.for.inc:
12573 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
12574 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
12575 // CHECK7-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
12576 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]]
12577 // CHECK7:       omp.inner.for.end:
12578 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12579 // CHECK7:       omp.loop.exit:
12580 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
12581 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12582 // CHECK7-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
12583 // CHECK7-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12584 // CHECK7:       .omp.final.then:
12585 // CHECK7-NEXT:    store i32 10, i32* [[I]], align 4
12586 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12587 // CHECK7:       .omp.final.done:
12588 // CHECK7-NEXT:    ret void
12589 //
12590 //
12591 // CHECK7-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
12592 // CHECK7-SAME: () #[[ATTR5]] {
12593 // CHECK7-NEXT:  entry:
12594 // CHECK7-NEXT:    call void @__tgt_register_requires(i64 1)
12595 // CHECK7-NEXT:    ret void
12596 //
12597 //
12598 // CHECK8-LABEL: define {{[^@]+}}@_Z3fooi
12599 // CHECK8-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
12600 // CHECK8-NEXT:  entry:
12601 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12602 // CHECK8-NEXT:    [[A:%.*]] = alloca i32, align 4
12603 // CHECK8-NEXT:    [[AA:%.*]] = alloca i16, align 2
12604 // CHECK8-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
12605 // CHECK8-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
12606 // CHECK8-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
12607 // CHECK8-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
12608 // CHECK8-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
12609 // CHECK8-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
12610 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12611 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12612 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12613 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12614 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED3:%.*]] = alloca i32, align 4
12615 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
12616 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
12617 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
12618 // CHECK8-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
12619 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12620 // CHECK8-NEXT:    [[AA_CASTED4:%.*]] = alloca i32, align 4
12621 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [1 x i8*], align 4
12622 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS7:%.*]] = alloca [1 x i8*], align 4
12623 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [1 x i8*], align 4
12624 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12625 // CHECK8-NEXT:    [[A_CASTED9:%.*]] = alloca i32, align 4
12626 // CHECK8-NEXT:    [[AA_CASTED10:%.*]] = alloca i32, align 4
12627 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS12:%.*]] = alloca [2 x i8*], align 4
12628 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS13:%.*]] = alloca [2 x i8*], align 4
12629 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS14:%.*]] = alloca [2 x i8*], align 4
12630 // CHECK8-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
12631 // CHECK8-NEXT:    [[A_CASTED18:%.*]] = alloca i32, align 4
12632 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [9 x i8*], align 4
12633 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS22:%.*]] = alloca [9 x i8*], align 4
12634 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [9 x i8*], align 4
12635 // CHECK8-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4
12636 // CHECK8-NEXT:    [[_TMP24:%.*]] = alloca i32, align 4
12637 // CHECK8-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
12638 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12639 // CHECK8-NEXT:    store i32 0, i32* [[A]], align 4
12640 // CHECK8-NEXT:    store i16 0, i16* [[AA]], align 2
12641 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
12642 // CHECK8-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
12643 // CHECK8-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
12644 // CHECK8-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
12645 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
12646 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
12647 // CHECK8-NEXT:    [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
12648 // CHECK8-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
12649 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
12650 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
12651 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
12652 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
12653 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_2]], align 4
12654 // CHECK8-NEXT:    [[TMP7:%.*]] = load i16, i16* [[AA]], align 2
12655 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12656 // CHECK8-NEXT:    store i16 [[TMP7]], i16* [[CONV]], align 2
12657 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12658 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12659 // CHECK8-NEXT:    store i32 [[TMP9]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
12660 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
12661 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12662 // CHECK8-NEXT:    store i32 [[TMP11]], i32* [[DOTCAPTURE_EXPR__CASTED3]], align 4
12663 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED3]], align 4
12664 // CHECK8-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12665 // CHECK8-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32*
12666 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[TMP14]], align 4
12667 // CHECK8-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12668 // CHECK8-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32*
12669 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[TMP16]], align 4
12670 // CHECK8-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
12671 // CHECK8-NEXT:    store i8* null, i8** [[TMP17]], align 4
12672 // CHECK8-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
12673 // CHECK8-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
12674 // CHECK8-NEXT:    store i32 [[TMP10]], i32* [[TMP19]], align 4
12675 // CHECK8-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
12676 // CHECK8-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32*
12677 // CHECK8-NEXT:    store i32 [[TMP10]], i32* [[TMP21]], align 4
12678 // CHECK8-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
12679 // CHECK8-NEXT:    store i8* null, i8** [[TMP22]], align 4
12680 // CHECK8-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
12681 // CHECK8-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32*
12682 // CHECK8-NEXT:    store i32 [[TMP12]], i32* [[TMP24]], align 4
12683 // CHECK8-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
12684 // CHECK8-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32*
12685 // CHECK8-NEXT:    store i32 [[TMP12]], i32* [[TMP26]], align 4
12686 // CHECK8-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
12687 // CHECK8-NEXT:    store i8* null, i8** [[TMP27]], align 4
12688 // CHECK8-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12689 // CHECK8-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12690 // CHECK8-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
12691 // CHECK8-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2
12692 // CHECK8-NEXT:    store i16 [[TMP31]], i16* [[TMP30]], align 4
12693 // CHECK8-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
12694 // CHECK8-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12695 // CHECK8-NEXT:    store i32 [[TMP33]], i32* [[TMP32]], align 4
12696 // CHECK8-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
12697 // CHECK8-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
12698 // CHECK8-NEXT:    store i32 [[TMP35]], i32* [[TMP34]], align 4
12699 // CHECK8-NEXT:    [[TMP36:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
12700 // CHECK8-NEXT:    [[TMP37:%.*]] = bitcast i8* [[TMP36]] to %struct.kmp_task_t_with_privates*
12701 // CHECK8-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP37]], i32 0, i32 0
12702 // CHECK8-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP38]], i32 0, i32 0
12703 // CHECK8-NEXT:    [[TMP40:%.*]] = load i8*, i8** [[TMP39]], align 4
12704 // CHECK8-NEXT:    [[TMP41:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
12705 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP40]], i8* align 4 [[TMP41]], i32 12, i1 false)
12706 // CHECK8-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP37]], i32 0, i32 1
12707 // CHECK8-NEXT:    [[TMP43:%.*]] = bitcast i8* [[TMP40]] to %struct.anon*
12708 // CHECK8-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 0
12709 // CHECK8-NEXT:    [[TMP45:%.*]] = bitcast [3 x i64]* [[TMP44]] to i8*
12710 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP45]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false)
12711 // CHECK8-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 1
12712 // CHECK8-NEXT:    [[TMP47:%.*]] = bitcast [3 x i8*]* [[TMP46]] to i8*
12713 // CHECK8-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP28]] to i8*
12714 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP47]], i8* align 4 [[TMP48]], i32 12, i1 false)
12715 // CHECK8-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 2
12716 // CHECK8-NEXT:    [[TMP50:%.*]] = bitcast [3 x i8*]* [[TMP49]] to i8*
12717 // CHECK8-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP29]] to i8*
12718 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP50]], i8* align 4 [[TMP51]], i32 12, i1 false)
12719 // CHECK8-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP42]], i32 0, i32 3
12720 // CHECK8-NEXT:    [[TMP53:%.*]] = load i16, i16* [[AA]], align 2
12721 // CHECK8-NEXT:    store i16 [[TMP53]], i16* [[TMP52]], align 4
12722 // CHECK8-NEXT:    [[TMP54:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP36]])
12723 // CHECK8-NEXT:    [[TMP55:%.*]] = load i32, i32* [[A]], align 4
12724 // CHECK8-NEXT:    store i32 [[TMP55]], i32* [[A_CASTED]], align 4
12725 // CHECK8-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A_CASTED]], align 4
12726 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102(i32 [[TMP56]]) #[[ATTR4:[0-9]+]]
12727 // CHECK8-NEXT:    [[TMP57:%.*]] = load i16, i16* [[AA]], align 2
12728 // CHECK8-NEXT:    [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16*
12729 // CHECK8-NEXT:    store i16 [[TMP57]], i16* [[CONV5]], align 2
12730 // CHECK8-NEXT:    [[TMP58:%.*]] = load i32, i32* [[AA_CASTED4]], align 4
12731 // CHECK8-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
12732 // CHECK8-NEXT:    [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32*
12733 // CHECK8-NEXT:    store i32 [[TMP58]], i32* [[TMP60]], align 4
12734 // CHECK8-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
12735 // CHECK8-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32*
12736 // CHECK8-NEXT:    store i32 [[TMP58]], i32* [[TMP62]], align 4
12737 // CHECK8-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0
12738 // CHECK8-NEXT:    store i8* null, i8** [[TMP63]], align 4
12739 // CHECK8-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
12740 // CHECK8-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
12741 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
12742 // CHECK8-NEXT:    [[TMP66:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111.region_id, i32 1, i8** [[TMP64]], i8** [[TMP65]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
12743 // CHECK8-NEXT:    [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0
12744 // CHECK8-NEXT:    br i1 [[TMP67]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
12745 // CHECK8:       omp_offload.failed:
12746 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i32 [[TMP58]]) #[[ATTR4]]
12747 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
12748 // CHECK8:       omp_offload.cont:
12749 // CHECK8-NEXT:    [[TMP68:%.*]] = load i32, i32* [[A]], align 4
12750 // CHECK8-NEXT:    store i32 [[TMP68]], i32* [[A_CASTED9]], align 4
12751 // CHECK8-NEXT:    [[TMP69:%.*]] = load i32, i32* [[A_CASTED9]], align 4
12752 // CHECK8-NEXT:    [[TMP70:%.*]] = load i16, i16* [[AA]], align 2
12753 // CHECK8-NEXT:    [[CONV11:%.*]] = bitcast i32* [[AA_CASTED10]] to i16*
12754 // CHECK8-NEXT:    store i16 [[TMP70]], i16* [[CONV11]], align 2
12755 // CHECK8-NEXT:    [[TMP71:%.*]] = load i32, i32* [[AA_CASTED10]], align 4
12756 // CHECK8-NEXT:    [[TMP72:%.*]] = load i32, i32* [[N_ADDR]], align 4
12757 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP72]], 10
12758 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
12759 // CHECK8:       omp_if.then:
12760 // CHECK8-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
12761 // CHECK8-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
12762 // CHECK8-NEXT:    store i32 [[TMP69]], i32* [[TMP74]], align 4
12763 // CHECK8-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
12764 // CHECK8-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
12765 // CHECK8-NEXT:    store i32 [[TMP69]], i32* [[TMP76]], align 4
12766 // CHECK8-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i32 0, i32 0
12767 // CHECK8-NEXT:    store i8* null, i8** [[TMP77]], align 4
12768 // CHECK8-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 1
12769 // CHECK8-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
12770 // CHECK8-NEXT:    store i32 [[TMP71]], i32* [[TMP79]], align 4
12771 // CHECK8-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 1
12772 // CHECK8-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
12773 // CHECK8-NEXT:    store i32 [[TMP71]], i32* [[TMP81]], align 4
12774 // CHECK8-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS14]], i32 0, i32 1
12775 // CHECK8-NEXT:    store i8* null, i8** [[TMP82]], align 4
12776 // CHECK8-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS12]], i32 0, i32 0
12777 // CHECK8-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS13]], i32 0, i32 0
12778 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
12779 // CHECK8-NEXT:    [[TMP85:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118.region_id, i32 2, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
12780 // CHECK8-NEXT:    [[TMP86:%.*]] = icmp ne i32 [[TMP85]], 0
12781 // CHECK8-NEXT:    br i1 [[TMP86]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
12782 // CHECK8:       omp_offload.failed16:
12783 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP69]], i32 [[TMP71]]) #[[ATTR4]]
12784 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT17]]
12785 // CHECK8:       omp_offload.cont17:
12786 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
12787 // CHECK8:       omp_if.else:
12788 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP69]], i32 [[TMP71]]) #[[ATTR4]]
12789 // CHECK8-NEXT:    br label [[OMP_IF_END]]
12790 // CHECK8:       omp_if.end:
12791 // CHECK8-NEXT:    [[TMP87:%.*]] = load i32, i32* [[A]], align 4
12792 // CHECK8-NEXT:    store i32 [[TMP87]], i32* [[A_CASTED18]], align 4
12793 // CHECK8-NEXT:    [[TMP88:%.*]] = load i32, i32* [[A_CASTED18]], align 4
12794 // CHECK8-NEXT:    [[TMP89:%.*]] = load i32, i32* [[N_ADDR]], align 4
12795 // CHECK8-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[TMP89]], 20
12796 // CHECK8-NEXT:    br i1 [[CMP19]], label [[OMP_IF_THEN20:%.*]], label [[OMP_IF_ELSE27:%.*]]
12797 // CHECK8:       omp_if.then20:
12798 // CHECK8-NEXT:    [[TMP90:%.*]] = mul nuw i32 [[TMP1]], 4
12799 // CHECK8-NEXT:    [[TMP91:%.*]] = sext i32 [[TMP90]] to i64
12800 // CHECK8-NEXT:    [[TMP92:%.*]] = mul nuw i32 5, [[TMP3]]
12801 // CHECK8-NEXT:    [[TMP93:%.*]] = mul nuw i32 [[TMP92]], 8
12802 // CHECK8-NEXT:    [[TMP94:%.*]] = sext i32 [[TMP93]] to i64
12803 // CHECK8-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
12804 // CHECK8-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32*
12805 // CHECK8-NEXT:    store i32 [[TMP88]], i32* [[TMP96]], align 4
12806 // CHECK8-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
12807 // CHECK8-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32*
12808 // CHECK8-NEXT:    store i32 [[TMP88]], i32* [[TMP98]], align 4
12809 // CHECK8-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
12810 // CHECK8-NEXT:    store i64 4, i64* [[TMP99]], align 4
12811 // CHECK8-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
12812 // CHECK8-NEXT:    store i8* null, i8** [[TMP100]], align 4
12813 // CHECK8-NEXT:    [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
12814 // CHECK8-NEXT:    [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]**
12815 // CHECK8-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4
12816 // CHECK8-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
12817 // CHECK8-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]**
12818 // CHECK8-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4
12819 // CHECK8-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
12820 // CHECK8-NEXT:    store i64 40, i64* [[TMP105]], align 4
12821 // CHECK8-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
12822 // CHECK8-NEXT:    store i8* null, i8** [[TMP106]], align 4
12823 // CHECK8-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2
12824 // CHECK8-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32*
12825 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP108]], align 4
12826 // CHECK8-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2
12827 // CHECK8-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32*
12828 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP110]], align 4
12829 // CHECK8-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
12830 // CHECK8-NEXT:    store i64 4, i64* [[TMP111]], align 4
12831 // CHECK8-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2
12832 // CHECK8-NEXT:    store i8* null, i8** [[TMP112]], align 4
12833 // CHECK8-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3
12834 // CHECK8-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float**
12835 // CHECK8-NEXT:    store float* [[VLA]], float** [[TMP114]], align 4
12836 // CHECK8-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3
12837 // CHECK8-NEXT:    [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float**
12838 // CHECK8-NEXT:    store float* [[VLA]], float** [[TMP116]], align 4
12839 // CHECK8-NEXT:    [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
12840 // CHECK8-NEXT:    store i64 [[TMP91]], i64* [[TMP117]], align 4
12841 // CHECK8-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3
12842 // CHECK8-NEXT:    store i8* null, i8** [[TMP118]], align 4
12843 // CHECK8-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4
12844 // CHECK8-NEXT:    [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]**
12845 // CHECK8-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4
12846 // CHECK8-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4
12847 // CHECK8-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]**
12848 // CHECK8-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4
12849 // CHECK8-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
12850 // CHECK8-NEXT:    store i64 400, i64* [[TMP123]], align 4
12851 // CHECK8-NEXT:    [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4
12852 // CHECK8-NEXT:    store i8* null, i8** [[TMP124]], align 4
12853 // CHECK8-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5
12854 // CHECK8-NEXT:    [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32*
12855 // CHECK8-NEXT:    store i32 5, i32* [[TMP126]], align 4
12856 // CHECK8-NEXT:    [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5
12857 // CHECK8-NEXT:    [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32*
12858 // CHECK8-NEXT:    store i32 5, i32* [[TMP128]], align 4
12859 // CHECK8-NEXT:    [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
12860 // CHECK8-NEXT:    store i64 4, i64* [[TMP129]], align 4
12861 // CHECK8-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5
12862 // CHECK8-NEXT:    store i8* null, i8** [[TMP130]], align 4
12863 // CHECK8-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6
12864 // CHECK8-NEXT:    [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32*
12865 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP132]], align 4
12866 // CHECK8-NEXT:    [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6
12867 // CHECK8-NEXT:    [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32*
12868 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP134]], align 4
12869 // CHECK8-NEXT:    [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6
12870 // CHECK8-NEXT:    store i64 4, i64* [[TMP135]], align 4
12871 // CHECK8-NEXT:    [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6
12872 // CHECK8-NEXT:    store i8* null, i8** [[TMP136]], align 4
12873 // CHECK8-NEXT:    [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7
12874 // CHECK8-NEXT:    [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double**
12875 // CHECK8-NEXT:    store double* [[VLA1]], double** [[TMP138]], align 4
12876 // CHECK8-NEXT:    [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7
12877 // CHECK8-NEXT:    [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double**
12878 // CHECK8-NEXT:    store double* [[VLA1]], double** [[TMP140]], align 4
12879 // CHECK8-NEXT:    [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
12880 // CHECK8-NEXT:    store i64 [[TMP94]], i64* [[TMP141]], align 4
12881 // CHECK8-NEXT:    [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7
12882 // CHECK8-NEXT:    store i8* null, i8** [[TMP142]], align 4
12883 // CHECK8-NEXT:    [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8
12884 // CHECK8-NEXT:    [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT**
12885 // CHECK8-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4
12886 // CHECK8-NEXT:    [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8
12887 // CHECK8-NEXT:    [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT**
12888 // CHECK8-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4
12889 // CHECK8-NEXT:    [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8
12890 // CHECK8-NEXT:    store i64 12, i64* [[TMP147]], align 4
12891 // CHECK8-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8
12892 // CHECK8-NEXT:    store i8* null, i8** [[TMP148]], align 4
12893 // CHECK8-NEXT:    [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
12894 // CHECK8-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
12895 // CHECK8-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
12896 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
12897 // CHECK8-NEXT:    [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
12898 // CHECK8-NEXT:    [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0
12899 // CHECK8-NEXT:    br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
12900 // CHECK8:       omp_offload.failed25:
12901 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
12902 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT26]]
12903 // CHECK8:       omp_offload.cont26:
12904 // CHECK8-NEXT:    br label [[OMP_IF_END28:%.*]]
12905 // CHECK8:       omp_if.else27:
12906 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]]
12907 // CHECK8-NEXT:    br label [[OMP_IF_END28]]
12908 // CHECK8:       omp_if.end28:
12909 // CHECK8-NEXT:    [[TMP154:%.*]] = load i32, i32* [[A]], align 4
12910 // CHECK8-NEXT:    [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
12911 // CHECK8-NEXT:    call void @llvm.stackrestore(i8* [[TMP155]])
12912 // CHECK8-NEXT:    ret i32 [[TMP154]]
12913 //
12914 //
12915 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
12916 // CHECK8-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR2:[0-9]+]] {
12917 // CHECK8-NEXT:  entry:
12918 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12919 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12920 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
12921 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12922 // CHECK8-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
12923 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12924 // CHECK8-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12925 // CHECK8-NEXT:    store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
12926 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12927 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12928 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
12929 // CHECK8-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
12930 // CHECK8-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
12931 // CHECK8-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12932 // CHECK8-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
12933 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12934 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP4]])
12935 // CHECK8-NEXT:    ret void
12936 //
12937 //
12938 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined.
12939 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
12940 // CHECK8-NEXT:  entry:
12941 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12942 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12943 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12944 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12945 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12946 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12947 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12948 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12949 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12950 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
12951 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12952 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12953 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12954 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12955 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12956 // CHECK8-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
12957 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12958 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12959 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12960 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
12961 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12962 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12963 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
12964 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12965 // CHECK8:       cond.true:
12966 // CHECK8-NEXT:    br label [[COND_END:%.*]]
12967 // CHECK8:       cond.false:
12968 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12969 // CHECK8-NEXT:    br label [[COND_END]]
12970 // CHECK8:       cond.end:
12971 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
12972 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12973 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12974 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
12975 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12976 // CHECK8:       omp.inner.for.cond:
12977 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
12978 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
12979 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
12980 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12981 // CHECK8:       omp.inner.for.body:
12982 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
12983 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
12984 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12985 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
12986 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12987 // CHECK8:       omp.body.continue:
12988 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12989 // CHECK8:       omp.inner.for.inc:
12990 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
12991 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
12992 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
12993 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
12994 // CHECK8:       omp.inner.for.end:
12995 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12996 // CHECK8:       omp.loop.exit:
12997 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
12998 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12999 // CHECK8-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
13000 // CHECK8-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13001 // CHECK8:       .omp.final.then:
13002 // CHECK8-NEXT:    store i32 10, i32* [[I]], align 4
13003 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13004 // CHECK8:       .omp.final.done:
13005 // CHECK8-NEXT:    ret void
13006 //
13007 //
13008 // CHECK8-LABEL: define {{[^@]+}}@.omp_task_privates_map.
13009 // CHECK8-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i16** noalias noundef [[TMP1:%.*]], [3 x i8*]** noalias noundef [[TMP2:%.*]], [3 x i8*]** noalias noundef [[TMP3:%.*]], [3 x i64]** noalias noundef [[TMP4:%.*]]) #[[ATTR5:[0-9]+]] {
13010 // CHECK8-NEXT:  entry:
13011 // CHECK8-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 4
13012 // CHECK8-NEXT:    [[DOTADDR1:%.*]] = alloca i16**, align 4
13013 // CHECK8-NEXT:    [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 4
13014 // CHECK8-NEXT:    [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 4
13015 // CHECK8-NEXT:    [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 4
13016 // CHECK8-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 4
13017 // CHECK8-NEXT:    store i16** [[TMP1]], i16*** [[DOTADDR1]], align 4
13018 // CHECK8-NEXT:    store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 4
13019 // CHECK8-NEXT:    store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 4
13020 // CHECK8-NEXT:    store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 4
13021 // CHECK8-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 4
13022 // CHECK8-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0
13023 // CHECK8-NEXT:    [[TMP7:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 4
13024 // CHECK8-NEXT:    store [3 x i64]* [[TMP6]], [3 x i64]** [[TMP7]], align 4
13025 // CHECK8-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1
13026 // CHECK8-NEXT:    [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 4
13027 // CHECK8-NEXT:    store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 4
13028 // CHECK8-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2
13029 // CHECK8-NEXT:    [[TMP11:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 4
13030 // CHECK8-NEXT:    store [3 x i8*]* [[TMP10]], [3 x i8*]** [[TMP11]], align 4
13031 // CHECK8-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3
13032 // CHECK8-NEXT:    [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 4
13033 // CHECK8-NEXT:    store i16* [[TMP12]], i16** [[TMP13]], align 4
13034 // CHECK8-NEXT:    ret void
13035 //
13036 //
13037 // CHECK8-LABEL: define {{[^@]+}}@.omp_task_entry.
13038 // CHECK8-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
13039 // CHECK8-NEXT:  entry:
13040 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
13041 // CHECK8-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
13042 // CHECK8-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
13043 // CHECK8-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
13044 // CHECK8-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
13045 // CHECK8-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
13046 // CHECK8-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 4
13047 // CHECK8-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 4
13048 // CHECK8-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 4
13049 // CHECK8-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 4
13050 // CHECK8-NEXT:    [[AA_CASTED_I:%.*]] = alloca i32, align 4
13051 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED_I:%.*]] = alloca i32, align 4
13052 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED4_I:%.*]] = alloca i32, align 4
13053 // CHECK8-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
13054 // CHECK8-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
13055 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
13056 // CHECK8-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
13057 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
13058 // CHECK8-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
13059 // CHECK8-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
13060 // CHECK8-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
13061 // CHECK8-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
13062 // CHECK8-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
13063 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
13064 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
13065 // CHECK8-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
13066 // CHECK8-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
13067 // CHECK8-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
13068 // CHECK8-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
13069 // CHECK8-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]])
13070 // CHECK8-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]])
13071 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27
13072 // CHECK8-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !27
13073 // CHECK8-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
13074 // CHECK8-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
13075 // CHECK8-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !27
13076 // CHECK8-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
13077 // CHECK8-NEXT:    [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
13078 // CHECK8-NEXT:    [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
13079 // CHECK8-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
13080 // CHECK8-NEXT:    [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)*
13081 // CHECK8-NEXT:    call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
13082 // CHECK8-NEXT:    [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !27
13083 // CHECK8-NEXT:    [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !27
13084 // CHECK8-NEXT:    [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !27
13085 // CHECK8-NEXT:    [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !27
13086 // CHECK8-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i32 0, i32 0
13087 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i32 0, i32 0
13088 // CHECK8-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i32 0, i32 0
13089 // CHECK8-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1
13090 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2
13091 // CHECK8-NEXT:    [[TMP25:%.*]] = load i32, i32* [[TMP23]], align 4
13092 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) #[[ATTR4]]
13093 // CHECK8-NEXT:    [[TMP26:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 [[TMP25]], i32 1, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
13094 // CHECK8-NEXT:    [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
13095 // CHECK8-NEXT:    br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
13096 // CHECK8:       omp_offload.failed.i:
13097 // CHECK8-NEXT:    [[TMP28:%.*]] = load i16, i16* [[TMP16]], align 2
13098 // CHECK8-NEXT:    [[CONV_I:%.*]] = bitcast i32* [[AA_CASTED_I]] to i16*
13099 // CHECK8-NEXT:    store i16 [[TMP28]], i16* [[CONV_I]], align 2, !noalias !27
13100 // CHECK8-NEXT:    [[TMP29:%.*]] = load i32, i32* [[AA_CASTED_I]], align 4, !noalias !27
13101 // CHECK8-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP23]], align 4
13102 // CHECK8-NEXT:    store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27
13103 // CHECK8-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27
13104 // CHECK8-NEXT:    [[TMP32:%.*]] = load i32, i32* [[TMP24]], align 4
13105 // CHECK8-NEXT:    store i32 [[TMP32]], i32* [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27
13106 // CHECK8-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27
13107 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i32 [[TMP29]], i32 [[TMP31]], i32 [[TMP33]]) #[[ATTR4]]
13108 // CHECK8-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
13109 // CHECK8:       .omp_outlined..1.exit:
13110 // CHECK8-NEXT:    ret i32 0
13111 //
13112 //
13113 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102
13114 // CHECK8-SAME: (i32 noundef [[A:%.*]]) #[[ATTR3]] {
13115 // CHECK8-NEXT:  entry:
13116 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
13117 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
13118 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
13119 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
13120 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
13121 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
13122 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]])
13123 // CHECK8-NEXT:    ret void
13124 //
13125 //
13126 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..2
13127 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
13128 // CHECK8-NEXT:  entry:
13129 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13130 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13131 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
13132 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13133 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13134 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13135 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13136 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13137 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13138 // CHECK8-NEXT:    [[A1:%.*]] = alloca i32, align 4
13139 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13140 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13141 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
13142 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13143 // CHECK8-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13144 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13145 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13146 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13147 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
13148 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13149 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13150 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
13151 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13152 // CHECK8:       cond.true:
13153 // CHECK8-NEXT:    br label [[COND_END:%.*]]
13154 // CHECK8:       cond.false:
13155 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13156 // CHECK8-NEXT:    br label [[COND_END]]
13157 // CHECK8:       cond.end:
13158 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
13159 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13160 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13161 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
13162 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13163 // CHECK8:       omp.inner.for.cond:
13164 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13165 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13166 // CHECK8-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
13167 // CHECK8-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13168 // CHECK8:       omp.inner.for.body:
13169 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13170 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
13171 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13172 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4, !nontemporal !28
13173 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A1]], align 4, !nontemporal !28
13174 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
13175 // CHECK8-NEXT:    store i32 [[ADD3]], i32* [[A1]], align 4, !nontemporal !28
13176 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13177 // CHECK8:       omp.body.continue:
13178 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13179 // CHECK8:       omp.inner.for.inc:
13180 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13181 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1
13182 // CHECK8-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4
13183 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
13184 // CHECK8:       omp.inner.for.end:
13185 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13186 // CHECK8:       omp.loop.exit:
13187 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
13188 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13189 // CHECK8-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
13190 // CHECK8-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13191 // CHECK8:       .omp.final.then:
13192 // CHECK8-NEXT:    store i32 10, i32* [[A_ADDR]], align 4
13193 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13194 // CHECK8:       .omp.final.done:
13195 // CHECK8-NEXT:    ret void
13196 //
13197 //
13198 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
13199 // CHECK8-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR2]] {
13200 // CHECK8-NEXT:  entry:
13201 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
13202 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
13203 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
13204 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
13205 // CHECK8-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
13206 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
13207 // CHECK8-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
13208 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
13209 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]])
13210 // CHECK8-NEXT:    ret void
13211 //
13212 //
13213 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..3
13214 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
13215 // CHECK8-NEXT:  entry:
13216 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13217 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13218 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
13219 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13220 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13221 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13222 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13223 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13224 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13225 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
13226 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13227 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13228 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
13229 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
13230 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13231 // CHECK8-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13232 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13233 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13234 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13235 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
13236 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13237 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13238 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
13239 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13240 // CHECK8:       cond.true:
13241 // CHECK8-NEXT:    br label [[COND_END:%.*]]
13242 // CHECK8:       cond.false:
13243 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13244 // CHECK8-NEXT:    br label [[COND_END]]
13245 // CHECK8:       cond.end:
13246 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
13247 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13248 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13249 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
13250 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13251 // CHECK8:       omp.inner.for.cond:
13252 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
13253 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
13254 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
13255 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13256 // CHECK8:       omp.inner.for.body:
13257 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
13258 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
13259 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13260 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !31
13261 // CHECK8-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !31
13262 // CHECK8-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
13263 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
13264 // CHECK8-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
13265 // CHECK8-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !31
13266 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13267 // CHECK8:       omp.body.continue:
13268 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13269 // CHECK8:       omp.inner.for.inc:
13270 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
13271 // CHECK8-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
13272 // CHECK8-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
13273 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
13274 // CHECK8:       omp.inner.for.end:
13275 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13276 // CHECK8:       omp.loop.exit:
13277 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
13278 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13279 // CHECK8-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
13280 // CHECK8-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13281 // CHECK8:       .omp.final.then:
13282 // CHECK8-NEXT:    store i32 10, i32* [[I]], align 4
13283 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13284 // CHECK8:       .omp.final.done:
13285 // CHECK8-NEXT:    ret void
13286 //
13287 //
13288 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
13289 // CHECK8-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] {
13290 // CHECK8-NEXT:  entry:
13291 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
13292 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
13293 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
13294 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
13295 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
13296 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
13297 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
13298 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
13299 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
13300 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
13301 // CHECK8-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
13302 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
13303 // CHECK8-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
13304 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
13305 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
13306 // CHECK8-NEXT:    ret void
13307 //
13308 //
13309 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..6
13310 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
13311 // CHECK8-NEXT:  entry:
13312 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13313 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13314 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
13315 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
13316 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13317 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13318 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13319 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13320 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13321 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13322 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
13323 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13324 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13325 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
13326 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
13327 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
13328 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13329 // CHECK8-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13330 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13331 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13332 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13333 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
13334 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13335 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13336 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
13337 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13338 // CHECK8:       cond.true:
13339 // CHECK8-NEXT:    br label [[COND_END:%.*]]
13340 // CHECK8:       cond.false:
13341 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13342 // CHECK8-NEXT:    br label [[COND_END]]
13343 // CHECK8:       cond.end:
13344 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
13345 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13346 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13347 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
13348 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13349 // CHECK8:       omp.inner.for.cond:
13350 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
13351 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
13352 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
13353 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13354 // CHECK8:       omp.inner.for.body:
13355 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
13356 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
13357 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13358 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !34
13359 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !34
13360 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
13361 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !34
13362 // CHECK8-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !34
13363 // CHECK8-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP9]] to i32
13364 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
13365 // CHECK8-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
13366 // CHECK8-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !34
13367 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13368 // CHECK8:       omp.body.continue:
13369 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13370 // CHECK8:       omp.inner.for.inc:
13371 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
13372 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1
13373 // CHECK8-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
13374 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
13375 // CHECK8:       omp.inner.for.end:
13376 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13377 // CHECK8:       omp.loop.exit:
13378 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
13379 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13380 // CHECK8-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
13381 // CHECK8-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13382 // CHECK8:       .omp.final.then:
13383 // CHECK8-NEXT:    store i32 10, i32* [[I]], align 4
13384 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13385 // CHECK8:       .omp.final.done:
13386 // CHECK8-NEXT:    ret void
13387 //
13388 //
13389 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
13390 // CHECK8-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR2]] {
13391 // CHECK8-NEXT:  entry:
13392 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
13393 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
13394 // CHECK8-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
13395 // CHECK8-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
13396 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
13397 // CHECK8-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
13398 // CHECK8-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
13399 // CHECK8-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
13400 // CHECK8-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
13401 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
13402 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
13403 // CHECK8-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
13404 // CHECK8-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
13405 // CHECK8-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
13406 // CHECK8-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
13407 // CHECK8-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
13408 // CHECK8-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
13409 // CHECK8-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
13410 // CHECK8-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
13411 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
13412 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
13413 // CHECK8-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
13414 // CHECK8-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
13415 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
13416 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
13417 // CHECK8-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
13418 // CHECK8-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
13419 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
13420 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
13421 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
13422 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
13423 // CHECK8-NEXT:    ret void
13424 //
13425 //
13426 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..9
13427 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR3]] {
13428 // CHECK8-NEXT:  entry:
13429 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13430 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13431 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
13432 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
13433 // CHECK8-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
13434 // CHECK8-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
13435 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
13436 // CHECK8-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
13437 // CHECK8-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
13438 // CHECK8-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
13439 // CHECK8-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
13440 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13441 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13442 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13443 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13444 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13445 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13446 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
13447 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13448 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13449 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
13450 // CHECK8-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
13451 // CHECK8-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
13452 // CHECK8-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
13453 // CHECK8-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
13454 // CHECK8-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
13455 // CHECK8-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
13456 // CHECK8-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
13457 // CHECK8-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
13458 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
13459 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
13460 // CHECK8-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
13461 // CHECK8-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
13462 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
13463 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
13464 // CHECK8-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
13465 // CHECK8-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
13466 // CHECK8-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 0
13467 // CHECK8-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
13468 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13469 // CHECK8-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13470 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13471 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13472 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13473 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
13474 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13475 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13476 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
13477 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13478 // CHECK8:       cond.true:
13479 // CHECK8-NEXT:    br label [[COND_END:%.*]]
13480 // CHECK8:       cond.false:
13481 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13482 // CHECK8-NEXT:    br label [[COND_END]]
13483 // CHECK8:       cond.end:
13484 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
13485 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13486 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13487 // CHECK8-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
13488 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13489 // CHECK8:       omp.inner.for.cond:
13490 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
13491 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !37
13492 // CHECK8-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
13493 // CHECK8-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13494 // CHECK8:       omp.inner.for.body:
13495 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
13496 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
13497 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13498 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !37
13499 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !37
13500 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
13501 // CHECK8-NEXT:    store i32 [[ADD6]], i32* [[A_ADDR]], align 4, !llvm.access.group !37
13502 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
13503 // CHECK8-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !37
13504 // CHECK8-NEXT:    [[CONV:%.*]] = fpext float [[TMP17]] to double
13505 // CHECK8-NEXT:    [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00
13506 // CHECK8-NEXT:    [[CONV8:%.*]] = fptrunc double [[ADD7]] to float
13507 // CHECK8-NEXT:    store float [[CONV8]], float* [[ARRAYIDX]], align 4, !llvm.access.group !37
13508 // CHECK8-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
13509 // CHECK8-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4, !llvm.access.group !37
13510 // CHECK8-NEXT:    [[CONV10:%.*]] = fpext float [[TMP18]] to double
13511 // CHECK8-NEXT:    [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00
13512 // CHECK8-NEXT:    [[CONV12:%.*]] = fptrunc double [[ADD11]] to float
13513 // CHECK8-NEXT:    store float [[CONV12]], float* [[ARRAYIDX9]], align 4, !llvm.access.group !37
13514 // CHECK8-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
13515 // CHECK8-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX13]], i32 0, i32 2
13516 // CHECK8-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !37
13517 // CHECK8-NEXT:    [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00
13518 // CHECK8-NEXT:    store double [[ADD15]], double* [[ARRAYIDX14]], align 8, !llvm.access.group !37
13519 // CHECK8-NEXT:    [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]]
13520 // CHECK8-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP20]]
13521 // CHECK8-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX16]], i32 3
13522 // CHECK8-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !37
13523 // CHECK8-NEXT:    [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00
13524 // CHECK8-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !37
13525 // CHECK8-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
13526 // CHECK8-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !37
13527 // CHECK8-NEXT:    [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1
13528 // CHECK8-NEXT:    store i64 [[ADD19]], i64* [[X]], align 4, !llvm.access.group !37
13529 // CHECK8-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
13530 // CHECK8-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !37
13531 // CHECK8-NEXT:    [[CONV20:%.*]] = sext i8 [[TMP23]] to i32
13532 // CHECK8-NEXT:    [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1
13533 // CHECK8-NEXT:    [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8
13534 // CHECK8-NEXT:    store i8 [[CONV22]], i8* [[Y]], align 4, !llvm.access.group !37
13535 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13536 // CHECK8:       omp.body.continue:
13537 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13538 // CHECK8:       omp.inner.for.inc:
13539 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
13540 // CHECK8-NEXT:    [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1
13541 // CHECK8-NEXT:    store i32 [[ADD23]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
13542 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
13543 // CHECK8:       omp.inner.for.end:
13544 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13545 // CHECK8:       omp.loop.exit:
13546 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
13547 // CHECK8-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13548 // CHECK8-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
13549 // CHECK8-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13550 // CHECK8:       .omp.final.then:
13551 // CHECK8-NEXT:    store i32 10, i32* [[I]], align 4
13552 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13553 // CHECK8:       .omp.final.done:
13554 // CHECK8-NEXT:    ret void
13555 //
13556 //
13557 // CHECK8-LABEL: define {{[^@]+}}@_Z3bari
13558 // CHECK8-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
13559 // CHECK8-NEXT:  entry:
13560 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13561 // CHECK8-NEXT:    [[A:%.*]] = alloca i32, align 4
13562 // CHECK8-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
13563 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13564 // CHECK8-NEXT:    store i32 0, i32* [[A]], align 4
13565 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
13566 // CHECK8-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
13567 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
13568 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
13569 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
13570 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
13571 // CHECK8-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
13572 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
13573 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
13574 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
13575 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
13576 // CHECK8-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
13577 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
13578 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
13579 // CHECK8-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
13580 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
13581 // CHECK8-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
13582 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
13583 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
13584 // CHECK8-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
13585 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
13586 // CHECK8-NEXT:    ret i32 [[TMP8]]
13587 //
13588 //
13589 // CHECK8-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
13590 // CHECK8-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
13591 // CHECK8-NEXT:  entry:
13592 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
13593 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13594 // CHECK8-NEXT:    [[B:%.*]] = alloca i32, align 4
13595 // CHECK8-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
13596 // CHECK8-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
13597 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
13598 // CHECK8-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
13599 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
13600 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4
13601 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4
13602 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4
13603 // CHECK8-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4
13604 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13605 // CHECK8-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
13606 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13607 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
13608 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
13609 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
13610 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
13611 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
13612 // CHECK8-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
13613 // CHECK8-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
13614 // CHECK8-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
13615 // CHECK8-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
13616 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
13617 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
13618 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
13619 // CHECK8-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
13620 // CHECK8-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
13621 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B]], align 4
13622 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[B_CASTED]], align 4
13623 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
13624 // CHECK8-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
13625 // CHECK8-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
13626 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
13627 // CHECK8-NEXT:    [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8
13628 // CHECK8-NEXT:    store i8 [[FROMBOOL2]], i8* [[CONV]], align 1
13629 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
13630 // CHECK8-NEXT:    [[TMP9:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
13631 // CHECK8-NEXT:    [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1
13632 // CHECK8-NEXT:    br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
13633 // CHECK8:       omp_if.then:
13634 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
13635 // CHECK8-NEXT:    [[TMP10:%.*]] = mul nuw i32 2, [[TMP1]]
13636 // CHECK8-NEXT:    [[TMP11:%.*]] = mul nuw i32 [[TMP10]], 2
13637 // CHECK8-NEXT:    [[TMP12:%.*]] = sext i32 [[TMP11]] to i64
13638 // CHECK8-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13639 // CHECK8-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.S1**
13640 // CHECK8-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP14]], align 4
13641 // CHECK8-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13642 // CHECK8-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
13643 // CHECK8-NEXT:    store double* [[A]], double** [[TMP16]], align 4
13644 // CHECK8-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
13645 // CHECK8-NEXT:    store i64 8, i64* [[TMP17]], align 4
13646 // CHECK8-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
13647 // CHECK8-NEXT:    store i8* null, i8** [[TMP18]], align 4
13648 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
13649 // CHECK8-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
13650 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[TMP20]], align 4
13651 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
13652 // CHECK8-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
13653 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[TMP22]], align 4
13654 // CHECK8-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
13655 // CHECK8-NEXT:    store i64 4, i64* [[TMP23]], align 4
13656 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
13657 // CHECK8-NEXT:    store i8* null, i8** [[TMP24]], align 4
13658 // CHECK8-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
13659 // CHECK8-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32*
13660 // CHECK8-NEXT:    store i32 2, i32* [[TMP26]], align 4
13661 // CHECK8-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
13662 // CHECK8-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
13663 // CHECK8-NEXT:    store i32 2, i32* [[TMP28]], align 4
13664 // CHECK8-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
13665 // CHECK8-NEXT:    store i64 4, i64* [[TMP29]], align 4
13666 // CHECK8-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
13667 // CHECK8-NEXT:    store i8* null, i8** [[TMP30]], align 4
13668 // CHECK8-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
13669 // CHECK8-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32*
13670 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP32]], align 4
13671 // CHECK8-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
13672 // CHECK8-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32*
13673 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP34]], align 4
13674 // CHECK8-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
13675 // CHECK8-NEXT:    store i64 4, i64* [[TMP35]], align 4
13676 // CHECK8-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
13677 // CHECK8-NEXT:    store i8* null, i8** [[TMP36]], align 4
13678 // CHECK8-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
13679 // CHECK8-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16**
13680 // CHECK8-NEXT:    store i16* [[VLA]], i16** [[TMP38]], align 4
13681 // CHECK8-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
13682 // CHECK8-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16**
13683 // CHECK8-NEXT:    store i16* [[VLA]], i16** [[TMP40]], align 4
13684 // CHECK8-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
13685 // CHECK8-NEXT:    store i64 [[TMP12]], i64* [[TMP41]], align 4
13686 // CHECK8-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
13687 // CHECK8-NEXT:    store i8* null, i8** [[TMP42]], align 4
13688 // CHECK8-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
13689 // CHECK8-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
13690 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[TMP44]], align 4
13691 // CHECK8-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
13692 // CHECK8-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
13693 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[TMP46]], align 4
13694 // CHECK8-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5
13695 // CHECK8-NEXT:    store i64 1, i64* [[TMP47]], align 4
13696 // CHECK8-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5
13697 // CHECK8-NEXT:    store i8* null, i8** [[TMP48]], align 4
13698 // CHECK8-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13699 // CHECK8-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13700 // CHECK8-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
13701 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
13702 // CHECK8-NEXT:    [[TMP52:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13703 // CHECK8-NEXT:    [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0
13704 // CHECK8-NEXT:    br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
13705 // CHECK8:       omp_offload.failed:
13706 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]]
13707 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
13708 // CHECK8:       omp_offload.cont:
13709 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
13710 // CHECK8:       omp_if.else:
13711 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]]
13712 // CHECK8-NEXT:    br label [[OMP_IF_END]]
13713 // CHECK8:       omp_if.end:
13714 // CHECK8-NEXT:    [[TMP54:%.*]] = mul nsw i32 1, [[TMP1]]
13715 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP54]]
13716 // CHECK8-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
13717 // CHECK8-NEXT:    [[TMP55:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2
13718 // CHECK8-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP55]] to i32
13719 // CHECK8-NEXT:    [[TMP56:%.*]] = load i32, i32* [[B]], align 4
13720 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], [[TMP56]]
13721 // CHECK8-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
13722 // CHECK8-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
13723 // CHECK8-NEXT:    ret i32 [[ADD6]]
13724 //
13725 //
13726 // CHECK8-LABEL: define {{[^@]+}}@_ZL7fstatici
13727 // CHECK8-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
13728 // CHECK8-NEXT:  entry:
13729 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13730 // CHECK8-NEXT:    [[A:%.*]] = alloca i32, align 4
13731 // CHECK8-NEXT:    [[AA:%.*]] = alloca i16, align 2
13732 // CHECK8-NEXT:    [[AAA:%.*]] = alloca i8, align 1
13733 // CHECK8-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
13734 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
13735 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
13736 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
13737 // CHECK8-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
13738 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
13739 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
13740 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
13741 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13742 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13743 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13744 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
13745 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13746 // CHECK8-NEXT:    store i32 0, i32* [[A]], align 4
13747 // CHECK8-NEXT:    store i16 0, i16* [[AA]], align 2
13748 // CHECK8-NEXT:    store i8 0, i8* [[AAA]], align 1
13749 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
13750 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
13751 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
13752 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
13753 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[N_CASTED]], align 4
13754 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
13755 // CHECK8-NEXT:    [[TMP4:%.*]] = load i16, i16* [[AA]], align 2
13756 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
13757 // CHECK8-NEXT:    store i16 [[TMP4]], i16* [[CONV]], align 2
13758 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[AA_CASTED]], align 4
13759 // CHECK8-NEXT:    [[TMP6:%.*]] = load i8, i8* [[AAA]], align 1
13760 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
13761 // CHECK8-NEXT:    store i8 [[TMP6]], i8* [[CONV1]], align 1
13762 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
13763 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[N_ADDR]], align 4
13764 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50
13765 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
13766 // CHECK8:       omp_if.then:
13767 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13768 // CHECK8-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
13769 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
13770 // CHECK8-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13771 // CHECK8-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32*
13772 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP12]], align 4
13773 // CHECK8-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
13774 // CHECK8-NEXT:    store i8* null, i8** [[TMP13]], align 4
13775 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
13776 // CHECK8-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
13777 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
13778 // CHECK8-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
13779 // CHECK8-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
13780 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP17]], align 4
13781 // CHECK8-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
13782 // CHECK8-NEXT:    store i8* null, i8** [[TMP18]], align 4
13783 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
13784 // CHECK8-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
13785 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[TMP20]], align 4
13786 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
13787 // CHECK8-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
13788 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[TMP22]], align 4
13789 // CHECK8-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
13790 // CHECK8-NEXT:    store i8* null, i8** [[TMP23]], align 4
13791 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
13792 // CHECK8-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
13793 // CHECK8-NEXT:    store i32 [[TMP7]], i32* [[TMP25]], align 4
13794 // CHECK8-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
13795 // CHECK8-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
13796 // CHECK8-NEXT:    store i32 [[TMP7]], i32* [[TMP27]], align 4
13797 // CHECK8-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
13798 // CHECK8-NEXT:    store i8* null, i8** [[TMP28]], align 4
13799 // CHECK8-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
13800 // CHECK8-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [10 x i32]**
13801 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP30]], align 4
13802 // CHECK8-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
13803 // CHECK8-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to [10 x i32]**
13804 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP32]], align 4
13805 // CHECK8-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
13806 // CHECK8-NEXT:    store i8* null, i8** [[TMP33]], align 4
13807 // CHECK8-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13808 // CHECK8-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13809 // CHECK8-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
13810 // CHECK8-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_]], align 4
13811 // CHECK8-NEXT:    [[TMP37:%.*]] = load i32, i32* [[N_ADDR]], align 4
13812 // CHECK8-NEXT:    store i32 [[TMP37]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13813 // CHECK8-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13814 // CHECK8-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13815 // CHECK8-NEXT:    [[SUB:%.*]] = sub i32 [[TMP38]], [[TMP39]]
13816 // CHECK8-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
13817 // CHECK8-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
13818 // CHECK8-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
13819 // CHECK8-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
13820 // CHECK8-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
13821 // CHECK8-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
13822 // CHECK8-NEXT:    [[ADD6:%.*]] = add i32 [[TMP40]], 1
13823 // CHECK8-NEXT:    [[TMP41:%.*]] = zext i32 [[ADD6]] to i64
13824 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]])
13825 // CHECK8-NEXT:    [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13826 // CHECK8-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
13827 // CHECK8-NEXT:    br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
13828 // CHECK8:       omp_offload.failed:
13829 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
13830 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
13831 // CHECK8:       omp_offload.cont:
13832 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
13833 // CHECK8:       omp_if.else:
13834 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], [10 x i32]* [[B]]) #[[ATTR4]]
13835 // CHECK8-NEXT:    br label [[OMP_IF_END]]
13836 // CHECK8:       omp_if.end:
13837 // CHECK8-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
13838 // CHECK8-NEXT:    ret i32 [[TMP44]]
13839 //
13840 //
13841 // CHECK8-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
13842 // CHECK8-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
13843 // CHECK8-NEXT:  entry:
13844 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13845 // CHECK8-NEXT:    [[A:%.*]] = alloca i32, align 4
13846 // CHECK8-NEXT:    [[AA:%.*]] = alloca i16, align 2
13847 // CHECK8-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
13848 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
13849 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
13850 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
13851 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
13852 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
13853 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13854 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13855 // CHECK8-NEXT:    store i32 0, i32* [[A]], align 4
13856 // CHECK8-NEXT:    store i16 0, i16* [[AA]], align 2
13857 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
13858 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
13859 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
13860 // CHECK8-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
13861 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
13862 // CHECK8-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
13863 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
13864 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
13865 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
13866 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
13867 // CHECK8:       omp_if.then:
13868 // CHECK8-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13869 // CHECK8-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
13870 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
13871 // CHECK8-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13872 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
13873 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
13874 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
13875 // CHECK8-NEXT:    store i8* null, i8** [[TMP9]], align 4
13876 // CHECK8-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
13877 // CHECK8-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
13878 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
13879 // CHECK8-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
13880 // CHECK8-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
13881 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
13882 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
13883 // CHECK8-NEXT:    store i8* null, i8** [[TMP14]], align 4
13884 // CHECK8-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
13885 // CHECK8-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
13886 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
13887 // CHECK8-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
13888 // CHECK8-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
13889 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
13890 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
13891 // CHECK8-NEXT:    store i8* null, i8** [[TMP19]], align 4
13892 // CHECK8-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13893 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13894 // CHECK8-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10)
13895 // CHECK8-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
13896 // CHECK8-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
13897 // CHECK8-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
13898 // CHECK8:       omp_offload.failed:
13899 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
13900 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
13901 // CHECK8:       omp_offload.cont:
13902 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
13903 // CHECK8:       omp_if.else:
13904 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
13905 // CHECK8-NEXT:    br label [[OMP_IF_END]]
13906 // CHECK8:       omp_if.end:
13907 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
13908 // CHECK8-NEXT:    ret i32 [[TMP24]]
13909 //
13910 //
13911 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
13912 // CHECK8-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
13913 // CHECK8-NEXT:  entry:
13914 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
13915 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
13916 // CHECK8-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
13917 // CHECK8-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
13918 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
13919 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
13920 // CHECK8-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
13921 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
13922 // CHECK8-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
13923 // CHECK8-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
13924 // CHECK8-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
13925 // CHECK8-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
13926 // CHECK8-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
13927 // CHECK8-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
13928 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
13929 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
13930 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
13931 // CHECK8-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
13932 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
13933 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
13934 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
13935 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
13936 // CHECK8-NEXT:    [[TMP6:%.*]] = load i8, i8* [[CONV]], align 1
13937 // CHECK8-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
13938 // CHECK8-NEXT:    [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
13939 // CHECK8-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
13940 // CHECK8-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV3]], align 1
13941 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
13942 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]], i32 [[TMP7]])
13943 // CHECK8-NEXT:    ret void
13944 //
13945 //
13946 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..11
13947 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
13948 // CHECK8-NEXT:  entry:
13949 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13950 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13951 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
13952 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
13953 // CHECK8-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
13954 // CHECK8-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
13955 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
13956 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
13957 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13958 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13959 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13960 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13961 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13962 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13963 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
13964 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13965 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13966 // CHECK8-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
13967 // CHECK8-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
13968 // CHECK8-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
13969 // CHECK8-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
13970 // CHECK8-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
13971 // CHECK8-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
13972 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
13973 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
13974 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
13975 // CHECK8-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
13976 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
13977 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13978 // CHECK8-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
13979 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13980 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13981 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13982 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
13983 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13984 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13985 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
13986 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13987 // CHECK8:       cond.true:
13988 // CHECK8-NEXT:    br label [[COND_END:%.*]]
13989 // CHECK8:       cond.false:
13990 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13991 // CHECK8-NEXT:    br label [[COND_END]]
13992 // CHECK8:       cond.end:
13993 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
13994 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13995 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13996 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
13997 // CHECK8-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1
13998 // CHECK8-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
13999 // CHECK8-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
14000 // CHECK8:       omp_if.then:
14001 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14002 // CHECK8:       omp.inner.for.cond:
14003 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
14004 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !40
14005 // CHECK8-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
14006 // CHECK8-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14007 // CHECK8:       omp.inner.for.body:
14008 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
14009 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
14010 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14011 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !40
14012 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !40
14013 // CHECK8-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double
14014 // CHECK8-NEXT:    [[ADD5:%.*]] = fadd double [[CONV4]], 1.500000e+00
14015 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
14016 // CHECK8-NEXT:    store double [[ADD5]], double* [[A]], align 4, !llvm.access.group !40
14017 // CHECK8-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
14018 // CHECK8-NEXT:    [[TMP14:%.*]] = load double, double* [[A6]], align 4, !llvm.access.group !40
14019 // CHECK8-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
14020 // CHECK8-NEXT:    store double [[INC]], double* [[A6]], align 4, !llvm.access.group !40
14021 // CHECK8-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
14022 // CHECK8-NEXT:    [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]]
14023 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]]
14024 // CHECK8-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
14025 // CHECK8-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !40
14026 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14027 // CHECK8:       omp.body.continue:
14028 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14029 // CHECK8:       omp.inner.for.inc:
14030 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
14031 // CHECK8-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
14032 // CHECK8-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
14033 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
14034 // CHECK8:       omp.inner.for.end:
14035 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
14036 // CHECK8:       omp_if.else:
14037 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
14038 // CHECK8:       omp.inner.for.cond10:
14039 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14040 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14041 // CHECK8-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14042 // CHECK8-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END26:%.*]]
14043 // CHECK8:       omp.inner.for.body12:
14044 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14045 // CHECK8-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1
14046 // CHECK8-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
14047 // CHECK8-NEXT:    store i32 [[ADD14]], i32* [[I]], align 4
14048 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[B_ADDR]], align 4
14049 // CHECK8-NEXT:    [[CONV15:%.*]] = sitofp i32 [[TMP20]] to double
14050 // CHECK8-NEXT:    [[ADD16:%.*]] = fadd double [[CONV15]], 1.500000e+00
14051 // CHECK8-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
14052 // CHECK8-NEXT:    store double [[ADD16]], double* [[A17]], align 4
14053 // CHECK8-NEXT:    [[A18:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
14054 // CHECK8-NEXT:    [[TMP21:%.*]] = load double, double* [[A18]], align 4
14055 // CHECK8-NEXT:    [[INC19:%.*]] = fadd double [[TMP21]], 1.000000e+00
14056 // CHECK8-NEXT:    store double [[INC19]], double* [[A18]], align 4
14057 // CHECK8-NEXT:    [[CONV20:%.*]] = fptosi double [[INC19]] to i16
14058 // CHECK8-NEXT:    [[TMP22:%.*]] = mul nsw i32 1, [[TMP2]]
14059 // CHECK8-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP22]]
14060 // CHECK8-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX21]], i32 1
14061 // CHECK8-NEXT:    store i16 [[CONV20]], i16* [[ARRAYIDX22]], align 2
14062 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE23:%.*]]
14063 // CHECK8:       omp.body.continue23:
14064 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC24:%.*]]
14065 // CHECK8:       omp.inner.for.inc24:
14066 // CHECK8-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
14067 // CHECK8-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP23]], 1
14068 // CHECK8-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_IV]], align 4
14069 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP43:![0-9]+]]
14070 // CHECK8:       omp.inner.for.end26:
14071 // CHECK8-NEXT:    br label [[OMP_IF_END]]
14072 // CHECK8:       omp_if.end:
14073 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14074 // CHECK8:       omp.loop.exit:
14075 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
14076 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14077 // CHECK8-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
14078 // CHECK8-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14079 // CHECK8:       .omp.final.then:
14080 // CHECK8-NEXT:    store i32 10, i32* [[I]], align 4
14081 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14082 // CHECK8:       .omp.final.done:
14083 // CHECK8-NEXT:    ret void
14084 //
14085 //
14086 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
14087 // CHECK8-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
14088 // CHECK8-NEXT:  entry:
14089 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
14090 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14091 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
14092 // CHECK8-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
14093 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
14094 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
14095 // CHECK8-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
14096 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
14097 // CHECK8-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
14098 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
14099 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14100 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
14101 // CHECK8-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
14102 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
14103 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
14104 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
14105 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
14106 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
14107 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
14108 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
14109 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
14110 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
14111 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
14112 // CHECK8-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
14113 // CHECK8-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
14114 // CHECK8-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
14115 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AA_CASTED]], align 4
14116 // CHECK8-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV1]], align 1
14117 // CHECK8-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
14118 // CHECK8-NEXT:    store i8 [[TMP7]], i8* [[CONV3]], align 1
14119 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
14120 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]])
14121 // CHECK8-NEXT:    ret void
14122 //
14123 //
14124 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..13
14125 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
14126 // CHECK8-NEXT:  entry:
14127 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14128 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14129 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
14130 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14131 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
14132 // CHECK8-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
14133 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
14134 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14135 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14136 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14137 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14138 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
14139 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
14140 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14141 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14142 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14143 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14144 // CHECK8-NEXT:    [[I6:%.*]] = alloca i32, align 4
14145 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14146 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14147 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
14148 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14149 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
14150 // CHECK8-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
14151 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
14152 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
14153 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
14154 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
14155 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
14156 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
14157 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14158 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14159 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14160 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14161 // CHECK8-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
14162 // CHECK8-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
14163 // CHECK8-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
14164 // CHECK8-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
14165 // CHECK8-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
14166 // CHECK8-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
14167 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14168 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
14169 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14170 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14171 // CHECK8-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
14172 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14173 // CHECK8:       omp.precond.then:
14174 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14175 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
14176 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
14177 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14178 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14179 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14180 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
14181 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14182 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14183 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
14184 // CHECK8-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
14185 // CHECK8-NEXT:    br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14186 // CHECK8:       cond.true:
14187 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
14188 // CHECK8-NEXT:    br label [[COND_END:%.*]]
14189 // CHECK8:       cond.false:
14190 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14191 // CHECK8-NEXT:    br label [[COND_END]]
14192 // CHECK8:       cond.end:
14193 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
14194 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14195 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14196 // CHECK8-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
14197 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14198 // CHECK8:       omp.inner.for.cond:
14199 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
14200 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !45
14201 // CHECK8-NEXT:    [[ADD8:%.*]] = add i32 [[TMP17]], 1
14202 // CHECK8-NEXT:    [[CMP9:%.*]] = icmp ult i32 [[TMP16]], [[ADD8]]
14203 // CHECK8-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14204 // CHECK8:       omp.inner.for.body:
14205 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !45
14206 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
14207 // CHECK8-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
14208 // CHECK8-NEXT:    [[ADD10:%.*]] = add i32 [[TMP18]], [[MUL]]
14209 // CHECK8-NEXT:    store i32 [[ADD10]], i32* [[I6]], align 4, !llvm.access.group !45
14210 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !45
14211 // CHECK8-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP20]], 1
14212 // CHECK8-NEXT:    store i32 [[ADD11]], i32* [[A_ADDR]], align 4, !llvm.access.group !45
14213 // CHECK8-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !45
14214 // CHECK8-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP21]] to i32
14215 // CHECK8-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
14216 // CHECK8-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
14217 // CHECK8-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !45
14218 // CHECK8-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV1]], align 1, !llvm.access.group !45
14219 // CHECK8-NEXT:    [[CONV15:%.*]] = sext i8 [[TMP22]] to i32
14220 // CHECK8-NEXT:    [[ADD16:%.*]] = add nsw i32 [[CONV15]], 1
14221 // CHECK8-NEXT:    [[CONV17:%.*]] = trunc i32 [[ADD16]] to i8
14222 // CHECK8-NEXT:    store i8 [[CONV17]], i8* [[CONV1]], align 1, !llvm.access.group !45
14223 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
14224 // CHECK8-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
14225 // CHECK8-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP23]], 1
14226 // CHECK8-NEXT:    store i32 [[ADD18]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
14227 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14228 // CHECK8:       omp.body.continue:
14229 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14230 // CHECK8:       omp.inner.for.inc:
14231 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
14232 // CHECK8-NEXT:    [[ADD19:%.*]] = add i32 [[TMP24]], 1
14233 // CHECK8-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
14234 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
14235 // CHECK8:       omp.inner.for.end:
14236 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14237 // CHECK8:       omp.loop.exit:
14238 // CHECK8-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14239 // CHECK8-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
14240 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
14241 // CHECK8-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14242 // CHECK8-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
14243 // CHECK8-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14244 // CHECK8:       .omp.final.then:
14245 // CHECK8-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14246 // CHECK8-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14247 // CHECK8-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14248 // CHECK8-NEXT:    [[SUB20:%.*]] = sub i32 [[TMP30]], [[TMP31]]
14249 // CHECK8-NEXT:    [[SUB21:%.*]] = sub i32 [[SUB20]], 1
14250 // CHECK8-NEXT:    [[ADD22:%.*]] = add i32 [[SUB21]], 1
14251 // CHECK8-NEXT:    [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
14252 // CHECK8-NEXT:    [[MUL24:%.*]] = mul i32 [[DIV23]], 1
14253 // CHECK8-NEXT:    [[ADD25:%.*]] = add i32 [[TMP29]], [[MUL24]]
14254 // CHECK8-NEXT:    store i32 [[ADD25]], i32* [[I6]], align 4
14255 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14256 // CHECK8:       .omp.final.done:
14257 // CHECK8-NEXT:    br label [[OMP_PRECOND_END]]
14258 // CHECK8:       omp.precond.end:
14259 // CHECK8-NEXT:    ret void
14260 //
14261 //
14262 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
14263 // CHECK8-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
14264 // CHECK8-NEXT:  entry:
14265 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
14266 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
14267 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
14268 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
14269 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
14270 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
14271 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
14272 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
14273 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
14274 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
14275 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
14276 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
14277 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
14278 // CHECK8-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
14279 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
14280 // CHECK8-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
14281 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
14282 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
14283 // CHECK8-NEXT:    ret void
14284 //
14285 //
14286 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..16
14287 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
14288 // CHECK8-NEXT:  entry:
14289 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
14290 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
14291 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
14292 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
14293 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
14294 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14295 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14296 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14297 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14298 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14299 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14300 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
14301 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
14302 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
14303 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
14304 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
14305 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
14306 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
14307 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
14308 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14309 // CHECK8-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
14310 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
14311 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
14312 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
14313 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
14314 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
14315 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14316 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
14317 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14318 // CHECK8:       cond.true:
14319 // CHECK8-NEXT:    br label [[COND_END:%.*]]
14320 // CHECK8:       cond.false:
14321 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
14322 // CHECK8-NEXT:    br label [[COND_END]]
14323 // CHECK8:       cond.end:
14324 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
14325 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
14326 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14327 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
14328 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14329 // CHECK8:       omp.inner.for.cond:
14330 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
14331 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !48
14332 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
14333 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14334 // CHECK8:       omp.inner.for.body:
14335 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
14336 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
14337 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14338 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !48
14339 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !48
14340 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
14341 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !48
14342 // CHECK8-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !48
14343 // CHECK8-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
14344 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
14345 // CHECK8-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
14346 // CHECK8-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !48
14347 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
14348 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !48
14349 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
14350 // CHECK8-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !48
14351 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14352 // CHECK8:       omp.body.continue:
14353 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14354 // CHECK8:       omp.inner.for.inc:
14355 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
14356 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
14357 // CHECK8-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
14358 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]]
14359 // CHECK8:       omp.inner.for.end:
14360 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
14361 // CHECK8:       omp.loop.exit:
14362 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
14363 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14364 // CHECK8-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
14365 // CHECK8-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14366 // CHECK8:       .omp.final.then:
14367 // CHECK8-NEXT:    store i32 10, i32* [[I]], align 4
14368 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14369 // CHECK8:       .omp.final.done:
14370 // CHECK8-NEXT:    ret void
14371 //
14372 //
14373 // CHECK8-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
14374 // CHECK8-SAME: () #[[ATTR5]] {
14375 // CHECK8-NEXT:  entry:
14376 // CHECK8-NEXT:    call void @__tgt_register_requires(i64 1)
14377 // CHECK8-NEXT:    ret void
14378 //
14379 //
14380 // CHECK9-LABEL: define {{[^@]+}}@_Z3fooi
14381 // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
14382 // CHECK9-NEXT:  entry:
14383 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14384 // CHECK9-NEXT:    [[A:%.*]] = alloca i32, align 4
14385 // CHECK9-NEXT:    [[AA:%.*]] = alloca i16, align 2
14386 // CHECK9-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
14387 // CHECK9-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
14388 // CHECK9-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
14389 // CHECK9-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
14390 // CHECK9-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
14391 // CHECK9-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
14392 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14393 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14394 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14395 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14396 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14397 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14398 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
14399 // CHECK9-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
14400 // CHECK9-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
14401 // CHECK9-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
14402 // CHECK9-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
14403 // CHECK9-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
14404 // CHECK9-NEXT:    [[A8:%.*]] = alloca i32, align 4
14405 // CHECK9-NEXT:    [[A9:%.*]] = alloca i32, align 4
14406 // CHECK9-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
14407 // CHECK9-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
14408 // CHECK9-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
14409 // CHECK9-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
14410 // CHECK9-NEXT:    [[I24:%.*]] = alloca i32, align 4
14411 // CHECK9-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
14412 // CHECK9-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
14413 // CHECK9-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
14414 // CHECK9-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
14415 // CHECK9-NEXT:    [[I40:%.*]] = alloca i32, align 4
14416 // CHECK9-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
14417 // CHECK9-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
14418 // CHECK9-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
14419 // CHECK9-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
14420 // CHECK9-NEXT:    [[I58:%.*]] = alloca i32, align 4
14421 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14422 // CHECK9-NEXT:    store i32 0, i32* [[A]], align 4
14423 // CHECK9-NEXT:    store i16 0, i16* [[AA]], align 2
14424 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14425 // CHECK9-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
14426 // CHECK9-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
14427 // CHECK9-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
14428 // CHECK9-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
14429 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
14430 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
14431 // CHECK9-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
14432 // CHECK9-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
14433 // CHECK9-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
14434 // CHECK9-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
14435 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
14436 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
14437 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
14438 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14439 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14440 // CHECK9-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
14441 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14442 // CHECK9-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
14443 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14444 // CHECK9:       omp.inner.for.cond:
14445 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14446 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
14447 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
14448 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14449 // CHECK9:       omp.inner.for.body:
14450 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14451 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
14452 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14453 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
14454 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14455 // CHECK9:       omp.body.continue:
14456 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14457 // CHECK9:       omp.inner.for.inc:
14458 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14459 // CHECK9-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
14460 // CHECK9-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14461 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
14462 // CHECK9:       omp.inner.for.end:
14463 // CHECK9-NEXT:    store i32 10, i32* [[I]], align 4
14464 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
14465 // CHECK9-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
14466 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
14467 // CHECK9-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV7]], align 4
14468 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A]], align 4
14469 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTLINEAR_START]], align 4
14470 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
14471 // CHECK9:       omp.inner.for.cond10:
14472 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
14473 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
14474 // CHECK9-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
14475 // CHECK9-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
14476 // CHECK9:       omp.inner.for.body12:
14477 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
14478 // CHECK9-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1
14479 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
14480 // CHECK9-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4
14481 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A8]], align 4
14482 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
14483 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4
14484 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
14485 // CHECK9:       omp.body.continue16:
14486 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
14487 // CHECK9:       omp.inner.for.inc17:
14488 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
14489 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
14490 // CHECK9-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
14491 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP7:![0-9]+]]
14492 // CHECK9:       omp.inner.for.end19:
14493 // CHECK9-NEXT:    store i32 10, i32* [[A]], align 4
14494 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
14495 // CHECK9-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
14496 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
14497 // CHECK9-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_IV23]], align 4
14498 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
14499 // CHECK9:       omp.inner.for.cond25:
14500 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
14501 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !9
14502 // CHECK9-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]]
14503 // CHECK9-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
14504 // CHECK9:       omp.inner.for.body27:
14505 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
14506 // CHECK9-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1
14507 // CHECK9-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
14508 // CHECK9-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !9
14509 // CHECK9-NEXT:    [[TMP24:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
14510 // CHECK9-NEXT:    [[CONV:%.*]] = sext i16 [[TMP24]] to i32
14511 // CHECK9-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
14512 // CHECK9-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
14513 // CHECK9-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !9
14514 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
14515 // CHECK9:       omp.body.continue32:
14516 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
14517 // CHECK9:       omp.inner.for.inc33:
14518 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
14519 // CHECK9-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1
14520 // CHECK9-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
14521 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]]
14522 // CHECK9:       omp.inner.for.end35:
14523 // CHECK9-NEXT:    store i32 10, i32* [[I24]], align 4
14524 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
14525 // CHECK9-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
14526 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
14527 // CHECK9-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV39]], align 4
14528 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
14529 // CHECK9:       omp.inner.for.cond41:
14530 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
14531 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !12
14532 // CHECK9-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
14533 // CHECK9-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
14534 // CHECK9:       omp.inner.for.body43:
14535 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
14536 // CHECK9-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1
14537 // CHECK9-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
14538 // CHECK9-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !12
14539 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
14540 // CHECK9-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1
14541 // CHECK9-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !12
14542 // CHECK9-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
14543 // CHECK9-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP31]] to i32
14544 // CHECK9-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
14545 // CHECK9-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
14546 // CHECK9-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !12
14547 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
14548 // CHECK9:       omp.body.continue50:
14549 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
14550 // CHECK9:       omp.inner.for.inc51:
14551 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
14552 // CHECK9-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1
14553 // CHECK9-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
14554 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP13:![0-9]+]]
14555 // CHECK9:       omp.inner.for.end53:
14556 // CHECK9-NEXT:    store i32 10, i32* [[I40]], align 4
14557 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
14558 // CHECK9-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
14559 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
14560 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV57]], align 4
14561 // CHECK9-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 0
14562 // CHECK9-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
14563 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
14564 // CHECK9:       omp.inner.for.cond59:
14565 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
14566 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !15
14567 // CHECK9-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]]
14568 // CHECK9-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
14569 // CHECK9:       omp.inner.for.body61:
14570 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
14571 // CHECK9-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1
14572 // CHECK9-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
14573 // CHECK9-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !15
14574 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
14575 // CHECK9-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1
14576 // CHECK9-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !15
14577 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
14578 // CHECK9-NEXT:    [[TMP38:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
14579 // CHECK9-NEXT:    [[CONV65:%.*]] = fpext float [[TMP38]] to double
14580 // CHECK9-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
14581 // CHECK9-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
14582 // CHECK9-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
14583 // CHECK9-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
14584 // CHECK9-NEXT:    [[TMP39:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !15
14585 // CHECK9-NEXT:    [[CONV69:%.*]] = fpext float [[TMP39]] to double
14586 // CHECK9-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
14587 // CHECK9-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
14588 // CHECK9-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !15
14589 // CHECK9-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
14590 // CHECK9-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i64 0, i64 2
14591 // CHECK9-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !15
14592 // CHECK9-NEXT:    [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00
14593 // CHECK9-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !15
14594 // CHECK9-NEXT:    [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]]
14595 // CHECK9-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP41]]
14596 // CHECK9-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i64 3
14597 // CHECK9-NEXT:    [[TMP42:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !15
14598 // CHECK9-NEXT:    [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00
14599 // CHECK9-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !15
14600 // CHECK9-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
14601 // CHECK9-NEXT:    [[TMP43:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
14602 // CHECK9-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1
14603 // CHECK9-NEXT:    store i64 [[ADD78]], i64* [[X]], align 8, !llvm.access.group !15
14604 // CHECK9-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
14605 // CHECK9-NEXT:    [[TMP44:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
14606 // CHECK9-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP44]] to i32
14607 // CHECK9-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
14608 // CHECK9-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
14609 // CHECK9-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 8, !llvm.access.group !15
14610 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
14611 // CHECK9:       omp.body.continue82:
14612 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
14613 // CHECK9:       omp.inner.for.inc83:
14614 // CHECK9-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
14615 // CHECK9-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1
14616 // CHECK9-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
14617 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP16:![0-9]+]]
14618 // CHECK9:       omp.inner.for.end85:
14619 // CHECK9-NEXT:    store i32 10, i32* [[I58]], align 4
14620 // CHECK9-NEXT:    [[TMP46:%.*]] = load i32, i32* [[A]], align 4
14621 // CHECK9-NEXT:    [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
14622 // CHECK9-NEXT:    call void @llvm.stackrestore(i8* [[TMP47]])
14623 // CHECK9-NEXT:    ret i32 [[TMP46]]
14624 //
14625 //
14626 // CHECK9-LABEL: define {{[^@]+}}@_Z3bari
14627 // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
14628 // CHECK9-NEXT:  entry:
14629 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14630 // CHECK9-NEXT:    [[A:%.*]] = alloca i32, align 4
14631 // CHECK9-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
14632 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14633 // CHECK9-NEXT:    store i32 0, i32* [[A]], align 4
14634 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14635 // CHECK9-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
14636 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
14637 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
14638 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
14639 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14640 // CHECK9-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
14641 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
14642 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
14643 // CHECK9-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
14644 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
14645 // CHECK9-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
14646 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
14647 // CHECK9-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
14648 // CHECK9-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
14649 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
14650 // CHECK9-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
14651 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
14652 // CHECK9-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
14653 // CHECK9-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
14654 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
14655 // CHECK9-NEXT:    ret i32 [[TMP8]]
14656 //
14657 //
14658 // CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
14659 // CHECK9-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
14660 // CHECK9-NEXT:  entry:
14661 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
14662 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14663 // CHECK9-NEXT:    [[B:%.*]] = alloca i32, align 4
14664 // CHECK9-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
14665 // CHECK9-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
14666 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14667 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14668 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14669 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14670 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
14671 // CHECK9-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
14672 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14673 // CHECK9-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
14674 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14675 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
14676 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
14677 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
14678 // CHECK9-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
14679 // CHECK9-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
14680 // CHECK9-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
14681 // CHECK9-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
14682 // CHECK9-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
14683 // CHECK9-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
14684 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14685 // CHECK9-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
14686 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14687 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
14688 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14689 // CHECK9:       omp.inner.for.cond:
14690 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
14691 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
14692 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
14693 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14694 // CHECK9:       omp.inner.for.body:
14695 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
14696 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
14697 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 0, [[MUL]]
14698 // CHECK9-NEXT:    store i32 [[ADD2]], i32* [[I]], align 4, !llvm.access.group !18
14699 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
14700 // CHECK9-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP9]] to double
14701 // CHECK9-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
14702 // CHECK9-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
14703 // CHECK9-NEXT:    store double [[ADD3]], double* [[A]], align 8, !llvm.access.group !18
14704 // CHECK9-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
14705 // CHECK9-NEXT:    [[TMP10:%.*]] = load double, double* [[A4]], align 8, !llvm.access.group !18
14706 // CHECK9-NEXT:    [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00
14707 // CHECK9-NEXT:    store double [[INC]], double* [[A4]], align 8, !llvm.access.group !18
14708 // CHECK9-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
14709 // CHECK9-NEXT:    [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]]
14710 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]]
14711 // CHECK9-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
14712 // CHECK9-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18
14713 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14714 // CHECK9:       omp.body.continue:
14715 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14716 // CHECK9:       omp.inner.for.inc:
14717 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
14718 // CHECK9-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
14719 // CHECK9-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
14720 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
14721 // CHECK9:       omp.inner.for.end:
14722 // CHECK9-NEXT:    store i32 10, i32* [[I]], align 4
14723 // CHECK9-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
14724 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
14725 // CHECK9-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX8]], i64 1
14726 // CHECK9-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX9]], align 2
14727 // CHECK9-NEXT:    [[CONV10:%.*]] = sext i16 [[TMP14]] to i32
14728 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[B]], align 4
14729 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP15]]
14730 // CHECK9-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
14731 // CHECK9-NEXT:    call void @llvm.stackrestore(i8* [[TMP16]])
14732 // CHECK9-NEXT:    ret i32 [[ADD11]]
14733 //
14734 //
14735 // CHECK9-LABEL: define {{[^@]+}}@_ZL7fstatici
14736 // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
14737 // CHECK9-NEXT:  entry:
14738 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14739 // CHECK9-NEXT:    [[A:%.*]] = alloca i32, align 4
14740 // CHECK9-NEXT:    [[AA:%.*]] = alloca i16, align 2
14741 // CHECK9-NEXT:    [[AAA:%.*]] = alloca i8, align 1
14742 // CHECK9-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
14743 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14744 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14745 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14746 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14747 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14748 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14749 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
14750 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14751 // CHECK9-NEXT:    [[I5:%.*]] = alloca i32, align 4
14752 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14753 // CHECK9-NEXT:    store i32 0, i32* [[A]], align 4
14754 // CHECK9-NEXT:    store i16 0, i16* [[AA]], align 2
14755 // CHECK9-NEXT:    store i8 0, i8* [[AAA]], align 1
14756 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
14757 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
14758 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
14759 // CHECK9-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14760 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14761 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14762 // CHECK9-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
14763 // CHECK9-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
14764 // CHECK9-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
14765 // CHECK9-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
14766 // CHECK9-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
14767 // CHECK9-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14768 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14769 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
14770 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
14771 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14772 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
14773 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14774 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14775 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
14776 // CHECK9-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
14777 // CHECK9:       simd.if.then:
14778 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14779 // CHECK9-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
14780 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14781 // CHECK9:       omp.inner.for.cond:
14782 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14783 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
14784 // CHECK9-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
14785 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
14786 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14787 // CHECK9:       omp.inner.for.body:
14788 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !21
14789 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14790 // CHECK9-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
14791 // CHECK9-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
14792 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !21
14793 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21
14794 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
14795 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !21
14796 // CHECK9-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21
14797 // CHECK9-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
14798 // CHECK9-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
14799 // CHECK9-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
14800 // CHECK9-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !21
14801 // CHECK9-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !21
14802 // CHECK9-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
14803 // CHECK9-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
14804 // CHECK9-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
14805 // CHECK9-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !21
14806 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
14807 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
14808 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
14809 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
14810 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14811 // CHECK9:       omp.body.continue:
14812 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14813 // CHECK9:       omp.inner.for.inc:
14814 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14815 // CHECK9-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
14816 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
14817 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
14818 // CHECK9:       omp.inner.for.end:
14819 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14820 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14821 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14822 // CHECK9-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
14823 // CHECK9-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
14824 // CHECK9-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
14825 // CHECK9-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
14826 // CHECK9-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
14827 // CHECK9-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
14828 // CHECK9-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
14829 // CHECK9-NEXT:    br label [[SIMD_IF_END]]
14830 // CHECK9:       simd.if.end:
14831 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
14832 // CHECK9-NEXT:    ret i32 [[TMP21]]
14833 //
14834 //
14835 // CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
14836 // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
14837 // CHECK9-NEXT:  entry:
14838 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14839 // CHECK9-NEXT:    [[A:%.*]] = alloca i32, align 4
14840 // CHECK9-NEXT:    [[AA:%.*]] = alloca i16, align 2
14841 // CHECK9-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
14842 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14843 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14844 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14845 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14846 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
14847 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14848 // CHECK9-NEXT:    store i32 0, i32* [[A]], align 4
14849 // CHECK9-NEXT:    store i16 0, i16* [[AA]], align 2
14850 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14851 // CHECK9-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
14852 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14853 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
14854 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14855 // CHECK9:       omp.inner.for.cond:
14856 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
14857 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
14858 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
14859 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14860 // CHECK9:       omp.inner.for.body:
14861 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
14862 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
14863 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14864 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
14865 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
14866 // CHECK9-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
14867 // CHECK9-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24
14868 // CHECK9-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
14869 // CHECK9-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
14870 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
14871 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
14872 // CHECK9-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24
14873 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
14874 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
14875 // CHECK9-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
14876 // CHECK9-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
14877 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14878 // CHECK9:       omp.body.continue:
14879 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14880 // CHECK9:       omp.inner.for.inc:
14881 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
14882 // CHECK9-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
14883 // CHECK9-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
14884 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
14885 // CHECK9:       omp.inner.for.end:
14886 // CHECK9-NEXT:    store i32 10, i32* [[I]], align 4
14887 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
14888 // CHECK9-NEXT:    ret i32 [[TMP8]]
14889 //
14890 //
14891 // CHECK10-LABEL: define {{[^@]+}}@_Z3fooi
14892 // CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
14893 // CHECK10-NEXT:  entry:
14894 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14895 // CHECK10-NEXT:    [[A:%.*]] = alloca i32, align 4
14896 // CHECK10-NEXT:    [[AA:%.*]] = alloca i16, align 2
14897 // CHECK10-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
14898 // CHECK10-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
14899 // CHECK10-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
14900 // CHECK10-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
14901 // CHECK10-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
14902 // CHECK10-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
14903 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14904 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14905 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14906 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14907 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14908 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14909 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
14910 // CHECK10-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
14911 // CHECK10-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
14912 // CHECK10-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
14913 // CHECK10-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
14914 // CHECK10-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
14915 // CHECK10-NEXT:    [[A8:%.*]] = alloca i32, align 4
14916 // CHECK10-NEXT:    [[A9:%.*]] = alloca i32, align 4
14917 // CHECK10-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
14918 // CHECK10-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
14919 // CHECK10-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
14920 // CHECK10-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
14921 // CHECK10-NEXT:    [[I24:%.*]] = alloca i32, align 4
14922 // CHECK10-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
14923 // CHECK10-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
14924 // CHECK10-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
14925 // CHECK10-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
14926 // CHECK10-NEXT:    [[I40:%.*]] = alloca i32, align 4
14927 // CHECK10-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
14928 // CHECK10-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
14929 // CHECK10-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
14930 // CHECK10-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
14931 // CHECK10-NEXT:    [[I58:%.*]] = alloca i32, align 4
14932 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14933 // CHECK10-NEXT:    store i32 0, i32* [[A]], align 4
14934 // CHECK10-NEXT:    store i16 0, i16* [[AA]], align 2
14935 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14936 // CHECK10-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
14937 // CHECK10-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
14938 // CHECK10-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
14939 // CHECK10-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
14940 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
14941 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
14942 // CHECK10-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
14943 // CHECK10-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
14944 // CHECK10-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
14945 // CHECK10-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
14946 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
14947 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
14948 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
14949 // CHECK10-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_2]], align 4
14950 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14951 // CHECK10-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
14952 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14953 // CHECK10-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
14954 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14955 // CHECK10:       omp.inner.for.cond:
14956 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14957 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
14958 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
14959 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14960 // CHECK10:       omp.inner.for.body:
14961 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14962 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
14963 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14964 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
14965 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14966 // CHECK10:       omp.body.continue:
14967 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14968 // CHECK10:       omp.inner.for.inc:
14969 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14970 // CHECK10-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
14971 // CHECK10-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14972 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
14973 // CHECK10:       omp.inner.for.end:
14974 // CHECK10-NEXT:    store i32 10, i32* [[I]], align 4
14975 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
14976 // CHECK10-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
14977 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
14978 // CHECK10-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV7]], align 4
14979 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A]], align 4
14980 // CHECK10-NEXT:    store i32 [[TMP14]], i32* [[DOTLINEAR_START]], align 4
14981 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
14982 // CHECK10:       omp.inner.for.cond10:
14983 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
14984 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
14985 // CHECK10-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
14986 // CHECK10-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
14987 // CHECK10:       omp.inner.for.body12:
14988 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
14989 // CHECK10-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1
14990 // CHECK10-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
14991 // CHECK10-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4
14992 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A8]], align 4
14993 // CHECK10-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
14994 // CHECK10-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4
14995 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
14996 // CHECK10:       omp.body.continue16:
14997 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
14998 // CHECK10:       omp.inner.for.inc17:
14999 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
15000 // CHECK10-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
15001 // CHECK10-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
15002 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP7:![0-9]+]]
15003 // CHECK10:       omp.inner.for.end19:
15004 // CHECK10-NEXT:    store i32 10, i32* [[A]], align 4
15005 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
15006 // CHECK10-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
15007 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
15008 // CHECK10-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_IV23]], align 4
15009 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
15010 // CHECK10:       omp.inner.for.cond25:
15011 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
15012 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !9
15013 // CHECK10-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]]
15014 // CHECK10-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
15015 // CHECK10:       omp.inner.for.body27:
15016 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
15017 // CHECK10-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1
15018 // CHECK10-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
15019 // CHECK10-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !9
15020 // CHECK10-NEXT:    [[TMP24:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
15021 // CHECK10-NEXT:    [[CONV:%.*]] = sext i16 [[TMP24]] to i32
15022 // CHECK10-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
15023 // CHECK10-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
15024 // CHECK10-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !9
15025 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
15026 // CHECK10:       omp.body.continue32:
15027 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
15028 // CHECK10:       omp.inner.for.inc33:
15029 // CHECK10-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
15030 // CHECK10-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1
15031 // CHECK10-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
15032 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]]
15033 // CHECK10:       omp.inner.for.end35:
15034 // CHECK10-NEXT:    store i32 10, i32* [[I24]], align 4
15035 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
15036 // CHECK10-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
15037 // CHECK10-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
15038 // CHECK10-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV39]], align 4
15039 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
15040 // CHECK10:       omp.inner.for.cond41:
15041 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
15042 // CHECK10-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !12
15043 // CHECK10-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
15044 // CHECK10-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
15045 // CHECK10:       omp.inner.for.body43:
15046 // CHECK10-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
15047 // CHECK10-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1
15048 // CHECK10-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
15049 // CHECK10-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !12
15050 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
15051 // CHECK10-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1
15052 // CHECK10-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !12
15053 // CHECK10-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
15054 // CHECK10-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP31]] to i32
15055 // CHECK10-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
15056 // CHECK10-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
15057 // CHECK10-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !12
15058 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
15059 // CHECK10:       omp.body.continue50:
15060 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
15061 // CHECK10:       omp.inner.for.inc51:
15062 // CHECK10-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
15063 // CHECK10-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1
15064 // CHECK10-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
15065 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP13:![0-9]+]]
15066 // CHECK10:       omp.inner.for.end53:
15067 // CHECK10-NEXT:    store i32 10, i32* [[I40]], align 4
15068 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
15069 // CHECK10-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
15070 // CHECK10-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
15071 // CHECK10-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV57]], align 4
15072 // CHECK10-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 0
15073 // CHECK10-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
15074 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
15075 // CHECK10:       omp.inner.for.cond59:
15076 // CHECK10-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
15077 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !15
15078 // CHECK10-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]]
15079 // CHECK10-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
15080 // CHECK10:       omp.inner.for.body61:
15081 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
15082 // CHECK10-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1
15083 // CHECK10-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
15084 // CHECK10-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !15
15085 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
15086 // CHECK10-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1
15087 // CHECK10-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !15
15088 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
15089 // CHECK10-NEXT:    [[TMP38:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
15090 // CHECK10-NEXT:    [[CONV65:%.*]] = fpext float [[TMP38]] to double
15091 // CHECK10-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
15092 // CHECK10-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
15093 // CHECK10-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
15094 // CHECK10-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
15095 // CHECK10-NEXT:    [[TMP39:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !15
15096 // CHECK10-NEXT:    [[CONV69:%.*]] = fpext float [[TMP39]] to double
15097 // CHECK10-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
15098 // CHECK10-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
15099 // CHECK10-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !15
15100 // CHECK10-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
15101 // CHECK10-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i64 0, i64 2
15102 // CHECK10-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !15
15103 // CHECK10-NEXT:    [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00
15104 // CHECK10-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !15
15105 // CHECK10-NEXT:    [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]]
15106 // CHECK10-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP41]]
15107 // CHECK10-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i64 3
15108 // CHECK10-NEXT:    [[TMP42:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !15
15109 // CHECK10-NEXT:    [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00
15110 // CHECK10-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !15
15111 // CHECK10-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
15112 // CHECK10-NEXT:    [[TMP43:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
15113 // CHECK10-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1
15114 // CHECK10-NEXT:    store i64 [[ADD78]], i64* [[X]], align 8, !llvm.access.group !15
15115 // CHECK10-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
15116 // CHECK10-NEXT:    [[TMP44:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
15117 // CHECK10-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP44]] to i32
15118 // CHECK10-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
15119 // CHECK10-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
15120 // CHECK10-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 8, !llvm.access.group !15
15121 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
15122 // CHECK10:       omp.body.continue82:
15123 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
15124 // CHECK10:       omp.inner.for.inc83:
15125 // CHECK10-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
15126 // CHECK10-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1
15127 // CHECK10-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
15128 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP16:![0-9]+]]
15129 // CHECK10:       omp.inner.for.end85:
15130 // CHECK10-NEXT:    store i32 10, i32* [[I58]], align 4
15131 // CHECK10-NEXT:    [[TMP46:%.*]] = load i32, i32* [[A]], align 4
15132 // CHECK10-NEXT:    [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
15133 // CHECK10-NEXT:    call void @llvm.stackrestore(i8* [[TMP47]])
15134 // CHECK10-NEXT:    ret i32 [[TMP46]]
15135 //
15136 //
15137 // CHECK10-LABEL: define {{[^@]+}}@_Z3bari
15138 // CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
15139 // CHECK10-NEXT:  entry:
15140 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15141 // CHECK10-NEXT:    [[A:%.*]] = alloca i32, align 4
15142 // CHECK10-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
15143 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15144 // CHECK10-NEXT:    store i32 0, i32* [[A]], align 4
15145 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15146 // CHECK10-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
15147 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
15148 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
15149 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
15150 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15151 // CHECK10-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
15152 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
15153 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
15154 // CHECK10-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
15155 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
15156 // CHECK10-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
15157 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
15158 // CHECK10-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
15159 // CHECK10-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
15160 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
15161 // CHECK10-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
15162 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
15163 // CHECK10-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
15164 // CHECK10-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
15165 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
15166 // CHECK10-NEXT:    ret i32 [[TMP8]]
15167 //
15168 //
15169 // CHECK10-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
15170 // CHECK10-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
15171 // CHECK10-NEXT:  entry:
15172 // CHECK10-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
15173 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15174 // CHECK10-NEXT:    [[B:%.*]] = alloca i32, align 4
15175 // CHECK10-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
15176 // CHECK10-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
15177 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15178 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15179 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15180 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15181 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15182 // CHECK10-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
15183 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15184 // CHECK10-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
15185 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15186 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
15187 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
15188 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15189 // CHECK10-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
15190 // CHECK10-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
15191 // CHECK10-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
15192 // CHECK10-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
15193 // CHECK10-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
15194 // CHECK10-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
15195 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15196 // CHECK10-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
15197 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15198 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
15199 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15200 // CHECK10:       omp.inner.for.cond:
15201 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
15202 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
15203 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15204 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15205 // CHECK10:       omp.inner.for.body:
15206 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
15207 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
15208 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 0, [[MUL]]
15209 // CHECK10-NEXT:    store i32 [[ADD2]], i32* [[I]], align 4, !llvm.access.group !18
15210 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
15211 // CHECK10-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP9]] to double
15212 // CHECK10-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
15213 // CHECK10-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
15214 // CHECK10-NEXT:    store double [[ADD3]], double* [[A]], align 8, !llvm.access.group !18
15215 // CHECK10-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
15216 // CHECK10-NEXT:    [[TMP10:%.*]] = load double, double* [[A4]], align 8, !llvm.access.group !18
15217 // CHECK10-NEXT:    [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00
15218 // CHECK10-NEXT:    store double [[INC]], double* [[A4]], align 8, !llvm.access.group !18
15219 // CHECK10-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
15220 // CHECK10-NEXT:    [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]]
15221 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]]
15222 // CHECK10-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
15223 // CHECK10-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18
15224 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15225 // CHECK10:       omp.body.continue:
15226 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15227 // CHECK10:       omp.inner.for.inc:
15228 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
15229 // CHECK10-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
15230 // CHECK10-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
15231 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
15232 // CHECK10:       omp.inner.for.end:
15233 // CHECK10-NEXT:    store i32 10, i32* [[I]], align 4
15234 // CHECK10-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
15235 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
15236 // CHECK10-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX8]], i64 1
15237 // CHECK10-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX9]], align 2
15238 // CHECK10-NEXT:    [[CONV10:%.*]] = sext i16 [[TMP14]] to i32
15239 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[B]], align 4
15240 // CHECK10-NEXT:    [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP15]]
15241 // CHECK10-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
15242 // CHECK10-NEXT:    call void @llvm.stackrestore(i8* [[TMP16]])
15243 // CHECK10-NEXT:    ret i32 [[ADD11]]
15244 //
15245 //
15246 // CHECK10-LABEL: define {{[^@]+}}@_ZL7fstatici
15247 // CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
15248 // CHECK10-NEXT:  entry:
15249 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15250 // CHECK10-NEXT:    [[A:%.*]] = alloca i32, align 4
15251 // CHECK10-NEXT:    [[AA:%.*]] = alloca i16, align 2
15252 // CHECK10-NEXT:    [[AAA:%.*]] = alloca i8, align 1
15253 // CHECK10-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
15254 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15255 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15256 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15257 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15258 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15259 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15260 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15261 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15262 // CHECK10-NEXT:    [[I5:%.*]] = alloca i32, align 4
15263 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15264 // CHECK10-NEXT:    store i32 0, i32* [[A]], align 4
15265 // CHECK10-NEXT:    store i16 0, i16* [[AA]], align 2
15266 // CHECK10-NEXT:    store i8 0, i8* [[AAA]], align 1
15267 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
15268 // CHECK10-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
15269 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15270 // CHECK10-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15271 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15272 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15273 // CHECK10-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
15274 // CHECK10-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
15275 // CHECK10-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
15276 // CHECK10-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
15277 // CHECK10-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
15278 // CHECK10-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15279 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15280 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15281 // CHECK10-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
15282 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15283 // CHECK10-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
15284 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15285 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15286 // CHECK10-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
15287 // CHECK10-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
15288 // CHECK10:       simd.if.then:
15289 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15290 // CHECK10-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
15291 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15292 // CHECK10:       omp.inner.for.cond:
15293 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
15294 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
15295 // CHECK10-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
15296 // CHECK10-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
15297 // CHECK10-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15298 // CHECK10:       omp.inner.for.body:
15299 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !21
15300 // CHECK10-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
15301 // CHECK10-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
15302 // CHECK10-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
15303 // CHECK10-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !21
15304 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21
15305 // CHECK10-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
15306 // CHECK10-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !21
15307 // CHECK10-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21
15308 // CHECK10-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
15309 // CHECK10-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
15310 // CHECK10-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
15311 // CHECK10-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !21
15312 // CHECK10-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !21
15313 // CHECK10-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
15314 // CHECK10-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
15315 // CHECK10-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
15316 // CHECK10-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !21
15317 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
15318 // CHECK10-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
15319 // CHECK10-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
15320 // CHECK10-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
15321 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15322 // CHECK10:       omp.body.continue:
15323 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15324 // CHECK10:       omp.inner.for.inc:
15325 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
15326 // CHECK10-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
15327 // CHECK10-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
15328 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
15329 // CHECK10:       omp.inner.for.end:
15330 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15331 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15332 // CHECK10-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15333 // CHECK10-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
15334 // CHECK10-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
15335 // CHECK10-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
15336 // CHECK10-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
15337 // CHECK10-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
15338 // CHECK10-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
15339 // CHECK10-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
15340 // CHECK10-NEXT:    br label [[SIMD_IF_END]]
15341 // CHECK10:       simd.if.end:
15342 // CHECK10-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
15343 // CHECK10-NEXT:    ret i32 [[TMP21]]
15344 //
15345 //
15346 // CHECK10-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
15347 // CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
15348 // CHECK10-NEXT:  entry:
15349 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15350 // CHECK10-NEXT:    [[A:%.*]] = alloca i32, align 4
15351 // CHECK10-NEXT:    [[AA:%.*]] = alloca i16, align 2
15352 // CHECK10-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
15353 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15354 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15355 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15356 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15357 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
15358 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15359 // CHECK10-NEXT:    store i32 0, i32* [[A]], align 4
15360 // CHECK10-NEXT:    store i16 0, i16* [[AA]], align 2
15361 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15362 // CHECK10-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
15363 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15364 // CHECK10-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
15365 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15366 // CHECK10:       omp.inner.for.cond:
15367 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
15368 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
15369 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
15370 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15371 // CHECK10:       omp.inner.for.body:
15372 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
15373 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
15374 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15375 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
15376 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
15377 // CHECK10-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
15378 // CHECK10-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24
15379 // CHECK10-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
15380 // CHECK10-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
15381 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
15382 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
15383 // CHECK10-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24
15384 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
15385 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
15386 // CHECK10-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
15387 // CHECK10-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
15388 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15389 // CHECK10:       omp.body.continue:
15390 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15391 // CHECK10:       omp.inner.for.inc:
15392 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
15393 // CHECK10-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
15394 // CHECK10-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
15395 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
15396 // CHECK10:       omp.inner.for.end:
15397 // CHECK10-NEXT:    store i32 10, i32* [[I]], align 4
15398 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
15399 // CHECK10-NEXT:    ret i32 [[TMP8]]
15400 //
15401 //
15402 // CHECK11-LABEL: define {{[^@]+}}@_Z3fooi
15403 // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
15404 // CHECK11-NEXT:  entry:
15405 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15406 // CHECK11-NEXT:    [[A:%.*]] = alloca i32, align 4
15407 // CHECK11-NEXT:    [[AA:%.*]] = alloca i16, align 2
15408 // CHECK11-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
15409 // CHECK11-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
15410 // CHECK11-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
15411 // CHECK11-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
15412 // CHECK11-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
15413 // CHECK11-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
15414 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15415 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15416 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15417 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15418 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15419 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15420 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
15421 // CHECK11-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
15422 // CHECK11-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
15423 // CHECK11-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
15424 // CHECK11-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
15425 // CHECK11-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
15426 // CHECK11-NEXT:    [[A8:%.*]] = alloca i32, align 4
15427 // CHECK11-NEXT:    [[A9:%.*]] = alloca i32, align 4
15428 // CHECK11-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
15429 // CHECK11-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
15430 // CHECK11-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
15431 // CHECK11-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
15432 // CHECK11-NEXT:    [[I24:%.*]] = alloca i32, align 4
15433 // CHECK11-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
15434 // CHECK11-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
15435 // CHECK11-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
15436 // CHECK11-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
15437 // CHECK11-NEXT:    [[I40:%.*]] = alloca i32, align 4
15438 // CHECK11-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
15439 // CHECK11-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
15440 // CHECK11-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
15441 // CHECK11-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
15442 // CHECK11-NEXT:    [[I58:%.*]] = alloca i32, align 4
15443 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15444 // CHECK11-NEXT:    store i32 0, i32* [[A]], align 4
15445 // CHECK11-NEXT:    store i16 0, i16* [[AA]], align 2
15446 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15447 // CHECK11-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
15448 // CHECK11-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
15449 // CHECK11-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
15450 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
15451 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15452 // CHECK11-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
15453 // CHECK11-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
15454 // CHECK11-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
15455 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
15456 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
15457 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
15458 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15459 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15460 // CHECK11-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
15461 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15462 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
15463 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15464 // CHECK11:       omp.inner.for.cond:
15465 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15466 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
15467 // CHECK11-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
15468 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15469 // CHECK11:       omp.inner.for.body:
15470 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15471 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
15472 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15473 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
15474 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15475 // CHECK11:       omp.body.continue:
15476 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15477 // CHECK11:       omp.inner.for.inc:
15478 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15479 // CHECK11-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
15480 // CHECK11-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15481 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
15482 // CHECK11:       omp.inner.for.end:
15483 // CHECK11-NEXT:    store i32 10, i32* [[I]], align 4
15484 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
15485 // CHECK11-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
15486 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
15487 // CHECK11-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV7]], align 4
15488 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[A]], align 4
15489 // CHECK11-NEXT:    store i32 [[TMP12]], i32* [[DOTLINEAR_START]], align 4
15490 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
15491 // CHECK11:       omp.inner.for.cond10:
15492 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
15493 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
15494 // CHECK11-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
15495 // CHECK11-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
15496 // CHECK11:       omp.inner.for.body12:
15497 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
15498 // CHECK11-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1
15499 // CHECK11-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
15500 // CHECK11-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4
15501 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A8]], align 4
15502 // CHECK11-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
15503 // CHECK11-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4
15504 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
15505 // CHECK11:       omp.body.continue16:
15506 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
15507 // CHECK11:       omp.inner.for.inc17:
15508 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
15509 // CHECK11-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
15510 // CHECK11-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
15511 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]]
15512 // CHECK11:       omp.inner.for.end19:
15513 // CHECK11-NEXT:    store i32 10, i32* [[A]], align 4
15514 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
15515 // CHECK11-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
15516 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
15517 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV23]], align 4
15518 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
15519 // CHECK11:       omp.inner.for.cond25:
15520 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
15521 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !10
15522 // CHECK11-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
15523 // CHECK11-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
15524 // CHECK11:       omp.inner.for.body27:
15525 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
15526 // CHECK11-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1
15527 // CHECK11-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
15528 // CHECK11-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !10
15529 // CHECK11-NEXT:    [[TMP22:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
15530 // CHECK11-NEXT:    [[CONV:%.*]] = sext i16 [[TMP22]] to i32
15531 // CHECK11-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
15532 // CHECK11-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
15533 // CHECK11-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !10
15534 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
15535 // CHECK11:       omp.body.continue32:
15536 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
15537 // CHECK11:       omp.inner.for.inc33:
15538 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
15539 // CHECK11-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1
15540 // CHECK11-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
15541 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]]
15542 // CHECK11:       omp.inner.for.end35:
15543 // CHECK11-NEXT:    store i32 10, i32* [[I24]], align 4
15544 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
15545 // CHECK11-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
15546 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
15547 // CHECK11-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV39]], align 4
15548 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
15549 // CHECK11:       omp.inner.for.cond41:
15550 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
15551 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !13
15552 // CHECK11-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
15553 // CHECK11-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
15554 // CHECK11:       omp.inner.for.body43:
15555 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
15556 // CHECK11-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1
15557 // CHECK11-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
15558 // CHECK11-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !13
15559 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
15560 // CHECK11-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1
15561 // CHECK11-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !13
15562 // CHECK11-NEXT:    [[TMP29:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
15563 // CHECK11-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP29]] to i32
15564 // CHECK11-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
15565 // CHECK11-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
15566 // CHECK11-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !13
15567 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
15568 // CHECK11:       omp.body.continue50:
15569 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
15570 // CHECK11:       omp.inner.for.inc51:
15571 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
15572 // CHECK11-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1
15573 // CHECK11-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
15574 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]]
15575 // CHECK11:       omp.inner.for.end53:
15576 // CHECK11-NEXT:    store i32 10, i32* [[I40]], align 4
15577 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
15578 // CHECK11-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
15579 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
15580 // CHECK11-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV57]], align 4
15581 // CHECK11-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 0
15582 // CHECK11-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
15583 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
15584 // CHECK11:       omp.inner.for.cond59:
15585 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
15586 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !16
15587 // CHECK11-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]]
15588 // CHECK11-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
15589 // CHECK11:       omp.inner.for.body61:
15590 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
15591 // CHECK11-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1
15592 // CHECK11-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
15593 // CHECK11-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !16
15594 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
15595 // CHECK11-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1
15596 // CHECK11-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !16
15597 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
15598 // CHECK11-NEXT:    [[TMP36:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
15599 // CHECK11-NEXT:    [[CONV65:%.*]] = fpext float [[TMP36]] to double
15600 // CHECK11-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
15601 // CHECK11-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
15602 // CHECK11-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
15603 // CHECK11-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
15604 // CHECK11-NEXT:    [[TMP37:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
15605 // CHECK11-NEXT:    [[CONV69:%.*]] = fpext float [[TMP37]] to double
15606 // CHECK11-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
15607 // CHECK11-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
15608 // CHECK11-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
15609 // CHECK11-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
15610 // CHECK11-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i32 0, i32 2
15611 // CHECK11-NEXT:    [[TMP38:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
15612 // CHECK11-NEXT:    [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00
15613 // CHECK11-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
15614 // CHECK11-NEXT:    [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]]
15615 // CHECK11-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP39]]
15616 // CHECK11-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i32 3
15617 // CHECK11-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
15618 // CHECK11-NEXT:    [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00
15619 // CHECK11-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
15620 // CHECK11-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
15621 // CHECK11-NEXT:    [[TMP41:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
15622 // CHECK11-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1
15623 // CHECK11-NEXT:    store i64 [[ADD78]], i64* [[X]], align 4, !llvm.access.group !16
15624 // CHECK11-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
15625 // CHECK11-NEXT:    [[TMP42:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
15626 // CHECK11-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP42]] to i32
15627 // CHECK11-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
15628 // CHECK11-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
15629 // CHECK11-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 4, !llvm.access.group !16
15630 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
15631 // CHECK11:       omp.body.continue82:
15632 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
15633 // CHECK11:       omp.inner.for.inc83:
15634 // CHECK11-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
15635 // CHECK11-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1
15636 // CHECK11-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
15637 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]]
15638 // CHECK11:       omp.inner.for.end85:
15639 // CHECK11-NEXT:    store i32 10, i32* [[I58]], align 4
15640 // CHECK11-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
15641 // CHECK11-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
15642 // CHECK11-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
15643 // CHECK11-NEXT:    ret i32 [[TMP44]]
15644 //
15645 //
15646 // CHECK11-LABEL: define {{[^@]+}}@_Z3bari
15647 // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
15648 // CHECK11-NEXT:  entry:
15649 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15650 // CHECK11-NEXT:    [[A:%.*]] = alloca i32, align 4
15651 // CHECK11-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
15652 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15653 // CHECK11-NEXT:    store i32 0, i32* [[A]], align 4
15654 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15655 // CHECK11-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
15656 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
15657 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
15658 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
15659 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15660 // CHECK11-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
15661 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
15662 // CHECK11-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
15663 // CHECK11-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
15664 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
15665 // CHECK11-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
15666 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
15667 // CHECK11-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
15668 // CHECK11-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
15669 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
15670 // CHECK11-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
15671 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
15672 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
15673 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
15674 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
15675 // CHECK11-NEXT:    ret i32 [[TMP8]]
15676 //
15677 //
15678 // CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
15679 // CHECK11-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
15680 // CHECK11-NEXT:  entry:
15681 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
15682 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15683 // CHECK11-NEXT:    [[B:%.*]] = alloca i32, align 4
15684 // CHECK11-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
15685 // CHECK11-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
15686 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15687 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15688 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15689 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15690 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
15691 // CHECK11-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
15692 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15693 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
15694 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15695 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
15696 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
15697 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15698 // CHECK11-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
15699 // CHECK11-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
15700 // CHECK11-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
15701 // CHECK11-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
15702 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
15703 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15704 // CHECK11-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
15705 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15706 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
15707 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15708 // CHECK11:       omp.inner.for.cond:
15709 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
15710 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
15711 // CHECK11-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
15712 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15713 // CHECK11:       omp.inner.for.body:
15714 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
15715 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
15716 // CHECK11-NEXT:    [[ADD2:%.*]] = add nsw i32 0, [[MUL]]
15717 // CHECK11-NEXT:    store i32 [[ADD2]], i32* [[I]], align 4, !llvm.access.group !19
15718 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
15719 // CHECK11-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP8]] to double
15720 // CHECK11-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
15721 // CHECK11-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
15722 // CHECK11-NEXT:    store double [[ADD3]], double* [[A]], align 4, !llvm.access.group !19
15723 // CHECK11-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
15724 // CHECK11-NEXT:    [[TMP9:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !19
15725 // CHECK11-NEXT:    [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00
15726 // CHECK11-NEXT:    store double [[INC]], double* [[A4]], align 4, !llvm.access.group !19
15727 // CHECK11-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
15728 // CHECK11-NEXT:    [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]]
15729 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]]
15730 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
15731 // CHECK11-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19
15732 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15733 // CHECK11:       omp.body.continue:
15734 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15735 // CHECK11:       omp.inner.for.inc:
15736 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
15737 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
15738 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
15739 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
15740 // CHECK11:       omp.inner.for.end:
15741 // CHECK11-NEXT:    store i32 10, i32* [[I]], align 4
15742 // CHECK11-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
15743 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
15744 // CHECK11-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX8]], i32 1
15745 // CHECK11-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX9]], align 2
15746 // CHECK11-NEXT:    [[CONV10:%.*]] = sext i16 [[TMP13]] to i32
15747 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[B]], align 4
15748 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP14]]
15749 // CHECK11-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
15750 // CHECK11-NEXT:    call void @llvm.stackrestore(i8* [[TMP15]])
15751 // CHECK11-NEXT:    ret i32 [[ADD11]]
15752 //
15753 //
15754 // CHECK11-LABEL: define {{[^@]+}}@_ZL7fstatici
15755 // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
15756 // CHECK11-NEXT:  entry:
15757 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15758 // CHECK11-NEXT:    [[A:%.*]] = alloca i32, align 4
15759 // CHECK11-NEXT:    [[AA:%.*]] = alloca i16, align 2
15760 // CHECK11-NEXT:    [[AAA:%.*]] = alloca i8, align 1
15761 // CHECK11-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
15762 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15763 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15764 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15765 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15766 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15767 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15768 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
15769 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15770 // CHECK11-NEXT:    [[I5:%.*]] = alloca i32, align 4
15771 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15772 // CHECK11-NEXT:    store i32 0, i32* [[A]], align 4
15773 // CHECK11-NEXT:    store i16 0, i16* [[AA]], align 2
15774 // CHECK11-NEXT:    store i8 0, i8* [[AAA]], align 1
15775 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
15776 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
15777 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15778 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15779 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15780 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15781 // CHECK11-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
15782 // CHECK11-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
15783 // CHECK11-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
15784 // CHECK11-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
15785 // CHECK11-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
15786 // CHECK11-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15787 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15788 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
15789 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
15790 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15791 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
15792 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15793 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15794 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
15795 // CHECK11-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
15796 // CHECK11:       simd.if.then:
15797 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15798 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
15799 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15800 // CHECK11:       omp.inner.for.cond:
15801 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
15802 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
15803 // CHECK11-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
15804 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
15805 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15806 // CHECK11:       omp.inner.for.body:
15807 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !22
15808 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
15809 // CHECK11-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
15810 // CHECK11-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
15811 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !22
15812 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22
15813 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
15814 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !22
15815 // CHECK11-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22
15816 // CHECK11-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
15817 // CHECK11-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
15818 // CHECK11-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
15819 // CHECK11-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !22
15820 // CHECK11-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !22
15821 // CHECK11-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
15822 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
15823 // CHECK11-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
15824 // CHECK11-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !22
15825 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
15826 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
15827 // CHECK11-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
15828 // CHECK11-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
15829 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15830 // CHECK11:       omp.body.continue:
15831 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15832 // CHECK11:       omp.inner.for.inc:
15833 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
15834 // CHECK11-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
15835 // CHECK11-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
15836 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
15837 // CHECK11:       omp.inner.for.end:
15838 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15839 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15840 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15841 // CHECK11-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
15842 // CHECK11-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
15843 // CHECK11-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
15844 // CHECK11-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
15845 // CHECK11-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
15846 // CHECK11-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
15847 // CHECK11-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
15848 // CHECK11-NEXT:    br label [[SIMD_IF_END]]
15849 // CHECK11:       simd.if.end:
15850 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
15851 // CHECK11-NEXT:    ret i32 [[TMP21]]
15852 //
15853 //
15854 // CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
15855 // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
15856 // CHECK11-NEXT:  entry:
15857 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15858 // CHECK11-NEXT:    [[A:%.*]] = alloca i32, align 4
15859 // CHECK11-NEXT:    [[AA:%.*]] = alloca i16, align 2
15860 // CHECK11-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
15861 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15862 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15863 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15864 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15865 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
15866 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15867 // CHECK11-NEXT:    store i32 0, i32* [[A]], align 4
15868 // CHECK11-NEXT:    store i16 0, i16* [[AA]], align 2
15869 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15870 // CHECK11-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
15871 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15872 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
15873 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15874 // CHECK11:       omp.inner.for.cond:
15875 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
15876 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
15877 // CHECK11-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
15878 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15879 // CHECK11:       omp.inner.for.body:
15880 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
15881 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
15882 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15883 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
15884 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
15885 // CHECK11-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
15886 // CHECK11-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25
15887 // CHECK11-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
15888 // CHECK11-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
15889 // CHECK11-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
15890 // CHECK11-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
15891 // CHECK11-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25
15892 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
15893 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
15894 // CHECK11-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
15895 // CHECK11-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
15896 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15897 // CHECK11:       omp.body.continue:
15898 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15899 // CHECK11:       omp.inner.for.inc:
15900 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
15901 // CHECK11-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
15902 // CHECK11-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
15903 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
15904 // CHECK11:       omp.inner.for.end:
15905 // CHECK11-NEXT:    store i32 10, i32* [[I]], align 4
15906 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
15907 // CHECK11-NEXT:    ret i32 [[TMP8]]
15908 //
15909 //
15910 // CHECK12-LABEL: define {{[^@]+}}@_Z3fooi
15911 // CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
15912 // CHECK12-NEXT:  entry:
15913 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15914 // CHECK12-NEXT:    [[A:%.*]] = alloca i32, align 4
15915 // CHECK12-NEXT:    [[AA:%.*]] = alloca i16, align 2
15916 // CHECK12-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
15917 // CHECK12-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
15918 // CHECK12-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
15919 // CHECK12-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
15920 // CHECK12-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
15921 // CHECK12-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
15922 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15923 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
15924 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15925 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15926 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15927 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15928 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
15929 // CHECK12-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
15930 // CHECK12-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
15931 // CHECK12-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
15932 // CHECK12-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
15933 // CHECK12-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
15934 // CHECK12-NEXT:    [[A8:%.*]] = alloca i32, align 4
15935 // CHECK12-NEXT:    [[A9:%.*]] = alloca i32, align 4
15936 // CHECK12-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
15937 // CHECK12-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
15938 // CHECK12-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
15939 // CHECK12-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
15940 // CHECK12-NEXT:    [[I24:%.*]] = alloca i32, align 4
15941 // CHECK12-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
15942 // CHECK12-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
15943 // CHECK12-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
15944 // CHECK12-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
15945 // CHECK12-NEXT:    [[I40:%.*]] = alloca i32, align 4
15946 // CHECK12-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
15947 // CHECK12-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
15948 // CHECK12-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
15949 // CHECK12-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
15950 // CHECK12-NEXT:    [[I58:%.*]] = alloca i32, align 4
15951 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15952 // CHECK12-NEXT:    store i32 0, i32* [[A]], align 4
15953 // CHECK12-NEXT:    store i16 0, i16* [[AA]], align 2
15954 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15955 // CHECK12-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
15956 // CHECK12-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
15957 // CHECK12-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
15958 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
15959 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15960 // CHECK12-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
15961 // CHECK12-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
15962 // CHECK12-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
15963 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
15964 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
15965 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
15966 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_2]], align 4
15967 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15968 // CHECK12-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
15969 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15970 // CHECK12-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
15971 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15972 // CHECK12:       omp.inner.for.cond:
15973 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15974 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
15975 // CHECK12-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
15976 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15977 // CHECK12:       omp.inner.for.body:
15978 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15979 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
15980 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15981 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
15982 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15983 // CHECK12:       omp.body.continue:
15984 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15985 // CHECK12:       omp.inner.for.inc:
15986 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15987 // CHECK12-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
15988 // CHECK12-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15989 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
15990 // CHECK12:       omp.inner.for.end:
15991 // CHECK12-NEXT:    store i32 10, i32* [[I]], align 4
15992 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
15993 // CHECK12-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
15994 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
15995 // CHECK12-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV7]], align 4
15996 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[A]], align 4
15997 // CHECK12-NEXT:    store i32 [[TMP12]], i32* [[DOTLINEAR_START]], align 4
15998 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
15999 // CHECK12:       omp.inner.for.cond10:
16000 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
16001 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
16002 // CHECK12-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
16003 // CHECK12-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
16004 // CHECK12:       omp.inner.for.body12:
16005 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
16006 // CHECK12-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1
16007 // CHECK12-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
16008 // CHECK12-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4
16009 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A8]], align 4
16010 // CHECK12-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
16011 // CHECK12-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4
16012 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
16013 // CHECK12:       omp.body.continue16:
16014 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
16015 // CHECK12:       omp.inner.for.inc17:
16016 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
16017 // CHECK12-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
16018 // CHECK12-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
16019 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]]
16020 // CHECK12:       omp.inner.for.end19:
16021 // CHECK12-NEXT:    store i32 10, i32* [[A]], align 4
16022 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
16023 // CHECK12-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
16024 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
16025 // CHECK12-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV23]], align 4
16026 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
16027 // CHECK12:       omp.inner.for.cond25:
16028 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
16029 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !10
16030 // CHECK12-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
16031 // CHECK12-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
16032 // CHECK12:       omp.inner.for.body27:
16033 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
16034 // CHECK12-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1
16035 // CHECK12-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
16036 // CHECK12-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !10
16037 // CHECK12-NEXT:    [[TMP22:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
16038 // CHECK12-NEXT:    [[CONV:%.*]] = sext i16 [[TMP22]] to i32
16039 // CHECK12-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
16040 // CHECK12-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
16041 // CHECK12-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !10
16042 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
16043 // CHECK12:       omp.body.continue32:
16044 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
16045 // CHECK12:       omp.inner.for.inc33:
16046 // CHECK12-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
16047 // CHECK12-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1
16048 // CHECK12-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
16049 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]]
16050 // CHECK12:       omp.inner.for.end35:
16051 // CHECK12-NEXT:    store i32 10, i32* [[I24]], align 4
16052 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
16053 // CHECK12-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
16054 // CHECK12-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
16055 // CHECK12-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV39]], align 4
16056 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
16057 // CHECK12:       omp.inner.for.cond41:
16058 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
16059 // CHECK12-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !13
16060 // CHECK12-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
16061 // CHECK12-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
16062 // CHECK12:       omp.inner.for.body43:
16063 // CHECK12-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
16064 // CHECK12-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1
16065 // CHECK12-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
16066 // CHECK12-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !13
16067 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
16068 // CHECK12-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1
16069 // CHECK12-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !13
16070 // CHECK12-NEXT:    [[TMP29:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
16071 // CHECK12-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP29]] to i32
16072 // CHECK12-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
16073 // CHECK12-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
16074 // CHECK12-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !13
16075 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
16076 // CHECK12:       omp.body.continue50:
16077 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
16078 // CHECK12:       omp.inner.for.inc51:
16079 // CHECK12-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
16080 // CHECK12-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1
16081 // CHECK12-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
16082 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]]
16083 // CHECK12:       omp.inner.for.end53:
16084 // CHECK12-NEXT:    store i32 10, i32* [[I40]], align 4
16085 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
16086 // CHECK12-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
16087 // CHECK12-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
16088 // CHECK12-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV57]], align 4
16089 // CHECK12-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 0
16090 // CHECK12-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
16091 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
16092 // CHECK12:       omp.inner.for.cond59:
16093 // CHECK12-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
16094 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !16
16095 // CHECK12-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]]
16096 // CHECK12-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
16097 // CHECK12:       omp.inner.for.body61:
16098 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
16099 // CHECK12-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1
16100 // CHECK12-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
16101 // CHECK12-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !16
16102 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
16103 // CHECK12-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1
16104 // CHECK12-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !16
16105 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
16106 // CHECK12-NEXT:    [[TMP36:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
16107 // CHECK12-NEXT:    [[CONV65:%.*]] = fpext float [[TMP36]] to double
16108 // CHECK12-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
16109 // CHECK12-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
16110 // CHECK12-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
16111 // CHECK12-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
16112 // CHECK12-NEXT:    [[TMP37:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
16113 // CHECK12-NEXT:    [[CONV69:%.*]] = fpext float [[TMP37]] to double
16114 // CHECK12-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
16115 // CHECK12-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
16116 // CHECK12-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
16117 // CHECK12-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
16118 // CHECK12-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i32 0, i32 2
16119 // CHECK12-NEXT:    [[TMP38:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
16120 // CHECK12-NEXT:    [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00
16121 // CHECK12-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
16122 // CHECK12-NEXT:    [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]]
16123 // CHECK12-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP39]]
16124 // CHECK12-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i32 3
16125 // CHECK12-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
16126 // CHECK12-NEXT:    [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00
16127 // CHECK12-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
16128 // CHECK12-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
16129 // CHECK12-NEXT:    [[TMP41:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
16130 // CHECK12-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1
16131 // CHECK12-NEXT:    store i64 [[ADD78]], i64* [[X]], align 4, !llvm.access.group !16
16132 // CHECK12-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
16133 // CHECK12-NEXT:    [[TMP42:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
16134 // CHECK12-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP42]] to i32
16135 // CHECK12-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
16136 // CHECK12-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
16137 // CHECK12-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 4, !llvm.access.group !16
16138 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
16139 // CHECK12:       omp.body.continue82:
16140 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
16141 // CHECK12:       omp.inner.for.inc83:
16142 // CHECK12-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
16143 // CHECK12-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1
16144 // CHECK12-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
16145 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]]
16146 // CHECK12:       omp.inner.for.end85:
16147 // CHECK12-NEXT:    store i32 10, i32* [[I58]], align 4
16148 // CHECK12-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
16149 // CHECK12-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
16150 // CHECK12-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
16151 // CHECK12-NEXT:    ret i32 [[TMP44]]
16152 //
16153 //
16154 // CHECK12-LABEL: define {{[^@]+}}@_Z3bari
16155 // CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
16156 // CHECK12-NEXT:  entry:
16157 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16158 // CHECK12-NEXT:    [[A:%.*]] = alloca i32, align 4
16159 // CHECK12-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
16160 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16161 // CHECK12-NEXT:    store i32 0, i32* [[A]], align 4
16162 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16163 // CHECK12-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
16164 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
16165 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
16166 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
16167 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16168 // CHECK12-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
16169 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
16170 // CHECK12-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
16171 // CHECK12-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
16172 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
16173 // CHECK12-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
16174 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
16175 // CHECK12-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
16176 // CHECK12-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
16177 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
16178 // CHECK12-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
16179 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
16180 // CHECK12-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
16181 // CHECK12-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
16182 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
16183 // CHECK12-NEXT:    ret i32 [[TMP8]]
16184 //
16185 //
16186 // CHECK12-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
16187 // CHECK12-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
16188 // CHECK12-NEXT:  entry:
16189 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
16190 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16191 // CHECK12-NEXT:    [[B:%.*]] = alloca i32, align 4
16192 // CHECK12-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
16193 // CHECK12-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
16194 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16195 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16196 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16197 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16198 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
16199 // CHECK12-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
16200 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16201 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
16202 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16203 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
16204 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
16205 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16206 // CHECK12-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
16207 // CHECK12-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
16208 // CHECK12-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
16209 // CHECK12-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
16210 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
16211 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16212 // CHECK12-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16213 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16214 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
16215 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16216 // CHECK12:       omp.inner.for.cond:
16217 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
16218 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
16219 // CHECK12-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
16220 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16221 // CHECK12:       omp.inner.for.body:
16222 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
16223 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
16224 // CHECK12-NEXT:    [[ADD2:%.*]] = add nsw i32 0, [[MUL]]
16225 // CHECK12-NEXT:    store i32 [[ADD2]], i32* [[I]], align 4, !llvm.access.group !19
16226 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
16227 // CHECK12-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP8]] to double
16228 // CHECK12-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
16229 // CHECK12-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
16230 // CHECK12-NEXT:    store double [[ADD3]], double* [[A]], align 4, !llvm.access.group !19
16231 // CHECK12-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16232 // CHECK12-NEXT:    [[TMP9:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !19
16233 // CHECK12-NEXT:    [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00
16234 // CHECK12-NEXT:    store double [[INC]], double* [[A4]], align 4, !llvm.access.group !19
16235 // CHECK12-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
16236 // CHECK12-NEXT:    [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]]
16237 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]]
16238 // CHECK12-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
16239 // CHECK12-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19
16240 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16241 // CHECK12:       omp.body.continue:
16242 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16243 // CHECK12:       omp.inner.for.inc:
16244 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
16245 // CHECK12-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
16246 // CHECK12-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
16247 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
16248 // CHECK12:       omp.inner.for.end:
16249 // CHECK12-NEXT:    store i32 10, i32* [[I]], align 4
16250 // CHECK12-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
16251 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
16252 // CHECK12-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX8]], i32 1
16253 // CHECK12-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX9]], align 2
16254 // CHECK12-NEXT:    [[CONV10:%.*]] = sext i16 [[TMP13]] to i32
16255 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[B]], align 4
16256 // CHECK12-NEXT:    [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP14]]
16257 // CHECK12-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
16258 // CHECK12-NEXT:    call void @llvm.stackrestore(i8* [[TMP15]])
16259 // CHECK12-NEXT:    ret i32 [[ADD11]]
16260 //
16261 //
16262 // CHECK12-LABEL: define {{[^@]+}}@_ZL7fstatici
16263 // CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
16264 // CHECK12-NEXT:  entry:
16265 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16266 // CHECK12-NEXT:    [[A:%.*]] = alloca i32, align 4
16267 // CHECK12-NEXT:    [[AA:%.*]] = alloca i16, align 2
16268 // CHECK12-NEXT:    [[AAA:%.*]] = alloca i8, align 1
16269 // CHECK12-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
16270 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16271 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16272 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16273 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
16274 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16275 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16276 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
16277 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16278 // CHECK12-NEXT:    [[I5:%.*]] = alloca i32, align 4
16279 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16280 // CHECK12-NEXT:    store i32 0, i32* [[A]], align 4
16281 // CHECK12-NEXT:    store i16 0, i16* [[AA]], align 2
16282 // CHECK12-NEXT:    store i8 0, i8* [[AAA]], align 1
16283 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
16284 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
16285 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16286 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16287 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16288 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16289 // CHECK12-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
16290 // CHECK12-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
16291 // CHECK12-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
16292 // CHECK12-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
16293 // CHECK12-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
16294 // CHECK12-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
16295 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16296 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
16297 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
16298 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16299 // CHECK12-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
16300 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16301 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16302 // CHECK12-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
16303 // CHECK12-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
16304 // CHECK12:       simd.if.then:
16305 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16306 // CHECK12-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
16307 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16308 // CHECK12:       omp.inner.for.cond:
16309 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
16310 // CHECK12-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
16311 // CHECK12-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
16312 // CHECK12-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
16313 // CHECK12-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16314 // CHECK12:       omp.inner.for.body:
16315 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !22
16316 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
16317 // CHECK12-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
16318 // CHECK12-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
16319 // CHECK12-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !22
16320 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22
16321 // CHECK12-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
16322 // CHECK12-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !22
16323 // CHECK12-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22
16324 // CHECK12-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
16325 // CHECK12-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
16326 // CHECK12-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
16327 // CHECK12-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !22
16328 // CHECK12-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !22
16329 // CHECK12-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
16330 // CHECK12-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
16331 // CHECK12-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
16332 // CHECK12-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !22
16333 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
16334 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
16335 // CHECK12-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
16336 // CHECK12-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
16337 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16338 // CHECK12:       omp.body.continue:
16339 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16340 // CHECK12:       omp.inner.for.inc:
16341 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
16342 // CHECK12-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
16343 // CHECK12-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
16344 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
16345 // CHECK12:       omp.inner.for.end:
16346 // CHECK12-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16347 // CHECK12-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16348 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16349 // CHECK12-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
16350 // CHECK12-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
16351 // CHECK12-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
16352 // CHECK12-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
16353 // CHECK12-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
16354 // CHECK12-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
16355 // CHECK12-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
16356 // CHECK12-NEXT:    br label [[SIMD_IF_END]]
16357 // CHECK12:       simd.if.end:
16358 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
16359 // CHECK12-NEXT:    ret i32 [[TMP21]]
16360 //
16361 //
16362 // CHECK12-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
16363 // CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
16364 // CHECK12-NEXT:  entry:
16365 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16366 // CHECK12-NEXT:    [[A:%.*]] = alloca i32, align 4
16367 // CHECK12-NEXT:    [[AA:%.*]] = alloca i16, align 2
16368 // CHECK12-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
16369 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16370 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16371 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16372 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16373 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
16374 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16375 // CHECK12-NEXT:    store i32 0, i32* [[A]], align 4
16376 // CHECK12-NEXT:    store i16 0, i16* [[AA]], align 2
16377 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16378 // CHECK12-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16379 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16380 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
16381 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16382 // CHECK12:       omp.inner.for.cond:
16383 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
16384 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
16385 // CHECK12-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
16386 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16387 // CHECK12:       omp.inner.for.body:
16388 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
16389 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
16390 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16391 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
16392 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
16393 // CHECK12-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
16394 // CHECK12-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25
16395 // CHECK12-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
16396 // CHECK12-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
16397 // CHECK12-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
16398 // CHECK12-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
16399 // CHECK12-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25
16400 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
16401 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
16402 // CHECK12-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
16403 // CHECK12-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
16404 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16405 // CHECK12:       omp.body.continue:
16406 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16407 // CHECK12:       omp.inner.for.inc:
16408 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
16409 // CHECK12-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
16410 // CHECK12-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
16411 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
16412 // CHECK12:       omp.inner.for.end:
16413 // CHECK12-NEXT:    store i32 10, i32* [[I]], align 4
16414 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
16415 // CHECK12-NEXT:    ret i32 [[TMP8]]
16416 //
16417 //
16418 // CHECK13-LABEL: define {{[^@]+}}@_Z3fooi
16419 // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
16420 // CHECK13-NEXT:  entry:
16421 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16422 // CHECK13-NEXT:    [[A:%.*]] = alloca i32, align 4
16423 // CHECK13-NEXT:    [[AA:%.*]] = alloca i16, align 2
16424 // CHECK13-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
16425 // CHECK13-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
16426 // CHECK13-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
16427 // CHECK13-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
16428 // CHECK13-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
16429 // CHECK13-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
16430 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16431 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
16432 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16433 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16434 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16435 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16436 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
16437 // CHECK13-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
16438 // CHECK13-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
16439 // CHECK13-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
16440 // CHECK13-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
16441 // CHECK13-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
16442 // CHECK13-NEXT:    [[A8:%.*]] = alloca i32, align 4
16443 // CHECK13-NEXT:    [[A9:%.*]] = alloca i32, align 4
16444 // CHECK13-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
16445 // CHECK13-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
16446 // CHECK13-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
16447 // CHECK13-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
16448 // CHECK13-NEXT:    [[I24:%.*]] = alloca i32, align 4
16449 // CHECK13-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
16450 // CHECK13-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
16451 // CHECK13-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
16452 // CHECK13-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
16453 // CHECK13-NEXT:    [[I40:%.*]] = alloca i32, align 4
16454 // CHECK13-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
16455 // CHECK13-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
16456 // CHECK13-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
16457 // CHECK13-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
16458 // CHECK13-NEXT:    [[I58:%.*]] = alloca i32, align 4
16459 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16460 // CHECK13-NEXT:    store i32 0, i32* [[A]], align 4
16461 // CHECK13-NEXT:    store i16 0, i16* [[AA]], align 2
16462 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16463 // CHECK13-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
16464 // CHECK13-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
16465 // CHECK13-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
16466 // CHECK13-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
16467 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
16468 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
16469 // CHECK13-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
16470 // CHECK13-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
16471 // CHECK13-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
16472 // CHECK13-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
16473 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
16474 // CHECK13-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
16475 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
16476 // CHECK13-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_2]], align 4
16477 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16478 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16479 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16480 // CHECK13-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
16481 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16482 // CHECK13:       omp.inner.for.cond:
16483 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
16484 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
16485 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
16486 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16487 // CHECK13:       omp.inner.for.body:
16488 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
16489 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
16490 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16491 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
16492 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16493 // CHECK13:       omp.body.continue:
16494 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16495 // CHECK13:       omp.inner.for.inc:
16496 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
16497 // CHECK13-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
16498 // CHECK13-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
16499 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
16500 // CHECK13:       omp.inner.for.end:
16501 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
16502 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
16503 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
16504 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
16505 // CHECK13-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV7]], align 4
16506 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A]], align 4
16507 // CHECK13-NEXT:    store i32 [[TMP14]], i32* [[DOTLINEAR_START]], align 4
16508 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
16509 // CHECK13:       omp.inner.for.cond10:
16510 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
16511 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
16512 // CHECK13-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
16513 // CHECK13-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
16514 // CHECK13:       omp.inner.for.body12:
16515 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
16516 // CHECK13-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1
16517 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
16518 // CHECK13-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4, !nontemporal !7
16519 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A8]], align 4, !nontemporal !7
16520 // CHECK13-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
16521 // CHECK13-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4, !nontemporal !7
16522 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
16523 // CHECK13:       omp.body.continue16:
16524 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
16525 // CHECK13:       omp.inner.for.inc17:
16526 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
16527 // CHECK13-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
16528 // CHECK13-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
16529 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]]
16530 // CHECK13:       omp.inner.for.end19:
16531 // CHECK13-NEXT:    store i32 10, i32* [[A]], align 4
16532 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
16533 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
16534 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
16535 // CHECK13-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_IV23]], align 4
16536 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
16537 // CHECK13:       omp.inner.for.cond25:
16538 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
16539 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !10
16540 // CHECK13-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]]
16541 // CHECK13-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
16542 // CHECK13:       omp.inner.for.body27:
16543 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
16544 // CHECK13-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1
16545 // CHECK13-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
16546 // CHECK13-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !10
16547 // CHECK13-NEXT:    [[TMP24:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
16548 // CHECK13-NEXT:    [[CONV:%.*]] = sext i16 [[TMP24]] to i32
16549 // CHECK13-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
16550 // CHECK13-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
16551 // CHECK13-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !10
16552 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
16553 // CHECK13:       omp.body.continue32:
16554 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
16555 // CHECK13:       omp.inner.for.inc33:
16556 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
16557 // CHECK13-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1
16558 // CHECK13-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
16559 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]]
16560 // CHECK13:       omp.inner.for.end35:
16561 // CHECK13-NEXT:    store i32 10, i32* [[I24]], align 4
16562 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
16563 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
16564 // CHECK13-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
16565 // CHECK13-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV39]], align 4
16566 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
16567 // CHECK13:       omp.inner.for.cond41:
16568 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
16569 // CHECK13-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !13
16570 // CHECK13-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
16571 // CHECK13-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
16572 // CHECK13:       omp.inner.for.body43:
16573 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
16574 // CHECK13-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1
16575 // CHECK13-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
16576 // CHECK13-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !13
16577 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
16578 // CHECK13-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1
16579 // CHECK13-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !13
16580 // CHECK13-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
16581 // CHECK13-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP31]] to i32
16582 // CHECK13-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
16583 // CHECK13-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
16584 // CHECK13-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !13
16585 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
16586 // CHECK13:       omp.body.continue50:
16587 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
16588 // CHECK13:       omp.inner.for.inc51:
16589 // CHECK13-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
16590 // CHECK13-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1
16591 // CHECK13-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
16592 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]]
16593 // CHECK13:       omp.inner.for.end53:
16594 // CHECK13-NEXT:    store i32 10, i32* [[I40]], align 4
16595 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
16596 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
16597 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
16598 // CHECK13-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV57]], align 4
16599 // CHECK13-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 0
16600 // CHECK13-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
16601 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
16602 // CHECK13:       omp.inner.for.cond59:
16603 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
16604 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !16
16605 // CHECK13-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]]
16606 // CHECK13-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
16607 // CHECK13:       omp.inner.for.body61:
16608 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
16609 // CHECK13-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1
16610 // CHECK13-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
16611 // CHECK13-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !16
16612 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
16613 // CHECK13-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1
16614 // CHECK13-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !16
16615 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
16616 // CHECK13-NEXT:    [[TMP38:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
16617 // CHECK13-NEXT:    [[CONV65:%.*]] = fpext float [[TMP38]] to double
16618 // CHECK13-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
16619 // CHECK13-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
16620 // CHECK13-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
16621 // CHECK13-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
16622 // CHECK13-NEXT:    [[TMP39:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
16623 // CHECK13-NEXT:    [[CONV69:%.*]] = fpext float [[TMP39]] to double
16624 // CHECK13-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
16625 // CHECK13-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
16626 // CHECK13-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
16627 // CHECK13-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
16628 // CHECK13-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i64 0, i64 2
16629 // CHECK13-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
16630 // CHECK13-NEXT:    [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00
16631 // CHECK13-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
16632 // CHECK13-NEXT:    [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]]
16633 // CHECK13-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP41]]
16634 // CHECK13-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i64 3
16635 // CHECK13-NEXT:    [[TMP42:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
16636 // CHECK13-NEXT:    [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00
16637 // CHECK13-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
16638 // CHECK13-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
16639 // CHECK13-NEXT:    [[TMP43:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !16
16640 // CHECK13-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1
16641 // CHECK13-NEXT:    store i64 [[ADD78]], i64* [[X]], align 8, !llvm.access.group !16
16642 // CHECK13-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
16643 // CHECK13-NEXT:    [[TMP44:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !16
16644 // CHECK13-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP44]] to i32
16645 // CHECK13-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
16646 // CHECK13-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
16647 // CHECK13-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 8, !llvm.access.group !16
16648 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
16649 // CHECK13:       omp.body.continue82:
16650 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
16651 // CHECK13:       omp.inner.for.inc83:
16652 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
16653 // CHECK13-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1
16654 // CHECK13-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
16655 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]]
16656 // CHECK13:       omp.inner.for.end85:
16657 // CHECK13-NEXT:    store i32 10, i32* [[I58]], align 4
16658 // CHECK13-NEXT:    [[TMP46:%.*]] = load i32, i32* [[A]], align 4
16659 // CHECK13-NEXT:    [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
16660 // CHECK13-NEXT:    call void @llvm.stackrestore(i8* [[TMP47]])
16661 // CHECK13-NEXT:    ret i32 [[TMP46]]
16662 //
16663 //
16664 // CHECK13-LABEL: define {{[^@]+}}@_Z3bari
16665 // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
16666 // CHECK13-NEXT:  entry:
16667 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16668 // CHECK13-NEXT:    [[A:%.*]] = alloca i32, align 4
16669 // CHECK13-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
16670 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16671 // CHECK13-NEXT:    store i32 0, i32* [[A]], align 4
16672 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16673 // CHECK13-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
16674 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
16675 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
16676 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
16677 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16678 // CHECK13-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
16679 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
16680 // CHECK13-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
16681 // CHECK13-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
16682 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
16683 // CHECK13-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
16684 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
16685 // CHECK13-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
16686 // CHECK13-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
16687 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
16688 // CHECK13-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
16689 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
16690 // CHECK13-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
16691 // CHECK13-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
16692 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
16693 // CHECK13-NEXT:    ret i32 [[TMP8]]
16694 //
16695 //
16696 // CHECK13-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
16697 // CHECK13-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
16698 // CHECK13-NEXT:  entry:
16699 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
16700 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16701 // CHECK13-NEXT:    [[B:%.*]] = alloca i32, align 4
16702 // CHECK13-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
16703 // CHECK13-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
16704 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
16705 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16706 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16707 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16708 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16709 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
16710 // CHECK13-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
16711 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16712 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
16713 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16714 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
16715 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
16716 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16717 // CHECK13-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
16718 // CHECK13-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
16719 // CHECK13-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
16720 // CHECK13-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
16721 // CHECK13-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
16722 // CHECK13-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
16723 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
16724 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
16725 // CHECK13-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
16726 // CHECK13-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
16727 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16728 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16729 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16730 // CHECK13-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
16731 // CHECK13-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
16732 // CHECK13-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
16733 // CHECK13-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
16734 // CHECK13:       omp_if.then:
16735 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16736 // CHECK13:       omp.inner.for.cond:
16737 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
16738 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
16739 // CHECK13-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
16740 // CHECK13-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16741 // CHECK13:       omp.inner.for.body:
16742 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
16743 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
16744 // CHECK13-NEXT:    [[ADD3:%.*]] = add nsw i32 0, [[MUL]]
16745 // CHECK13-NEXT:    store i32 [[ADD3]], i32* [[I]], align 4, !llvm.access.group !19
16746 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
16747 // CHECK13-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP11]] to double
16748 // CHECK13-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
16749 // CHECK13-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
16750 // CHECK13-NEXT:    store double [[ADD4]], double* [[A]], align 8, !llvm.access.group !19
16751 // CHECK13-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16752 // CHECK13-NEXT:    [[TMP12:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !19
16753 // CHECK13-NEXT:    [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00
16754 // CHECK13-NEXT:    store double [[INC]], double* [[A5]], align 8, !llvm.access.group !19
16755 // CHECK13-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
16756 // CHECK13-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
16757 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
16758 // CHECK13-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
16759 // CHECK13-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !19
16760 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16761 // CHECK13:       omp.body.continue:
16762 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16763 // CHECK13:       omp.inner.for.inc:
16764 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
16765 // CHECK13-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP14]], 1
16766 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
16767 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
16768 // CHECK13:       omp.inner.for.end:
16769 // CHECK13-NEXT:    br label [[OMP_IF_END:%.*]]
16770 // CHECK13:       omp_if.else:
16771 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
16772 // CHECK13:       omp.inner.for.cond9:
16773 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16774 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
16775 // CHECK13-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
16776 // CHECK13-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]]
16777 // CHECK13:       omp.inner.for.body11:
16778 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16779 // CHECK13-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP17]], 1
16780 // CHECK13-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
16781 // CHECK13-NEXT:    store i32 [[ADD13]], i32* [[I]], align 4
16782 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[B]], align 4
16783 // CHECK13-NEXT:    [[CONV14:%.*]] = sitofp i32 [[TMP18]] to double
16784 // CHECK13-NEXT:    [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00
16785 // CHECK13-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16786 // CHECK13-NEXT:    store double [[ADD15]], double* [[A16]], align 8
16787 // CHECK13-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16788 // CHECK13-NEXT:    [[TMP19:%.*]] = load double, double* [[A17]], align 8
16789 // CHECK13-NEXT:    [[INC18:%.*]] = fadd double [[TMP19]], 1.000000e+00
16790 // CHECK13-NEXT:    store double [[INC18]], double* [[A17]], align 8
16791 // CHECK13-NEXT:    [[CONV19:%.*]] = fptosi double [[INC18]] to i16
16792 // CHECK13-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]]
16793 // CHECK13-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]]
16794 // CHECK13-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX20]], i64 1
16795 // CHECK13-NEXT:    store i16 [[CONV19]], i16* [[ARRAYIDX21]], align 2
16796 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE22:%.*]]
16797 // CHECK13:       omp.body.continue22:
16798 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC23:%.*]]
16799 // CHECK13:       omp.inner.for.inc23:
16800 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
16801 // CHECK13-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP21]], 1
16802 // CHECK13-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4
16803 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP22:![0-9]+]]
16804 // CHECK13:       omp.inner.for.end25:
16805 // CHECK13-NEXT:    br label [[OMP_IF_END]]
16806 // CHECK13:       omp_if.end:
16807 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
16808 // CHECK13-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
16809 // CHECK13-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]]
16810 // CHECK13-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1
16811 // CHECK13-NEXT:    [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX27]], align 2
16812 // CHECK13-NEXT:    [[CONV28:%.*]] = sext i16 [[TMP23]] to i32
16813 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[B]], align 4
16814 // CHECK13-NEXT:    [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP24]]
16815 // CHECK13-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
16816 // CHECK13-NEXT:    call void @llvm.stackrestore(i8* [[TMP25]])
16817 // CHECK13-NEXT:    ret i32 [[ADD29]]
16818 //
16819 //
16820 // CHECK13-LABEL: define {{[^@]+}}@_ZL7fstatici
16821 // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
16822 // CHECK13-NEXT:  entry:
16823 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16824 // CHECK13-NEXT:    [[A:%.*]] = alloca i32, align 4
16825 // CHECK13-NEXT:    [[AA:%.*]] = alloca i16, align 2
16826 // CHECK13-NEXT:    [[AAA:%.*]] = alloca i8, align 1
16827 // CHECK13-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
16828 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16829 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16830 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16831 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
16832 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16833 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16834 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
16835 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16836 // CHECK13-NEXT:    [[I5:%.*]] = alloca i32, align 4
16837 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16838 // CHECK13-NEXT:    store i32 0, i32* [[A]], align 4
16839 // CHECK13-NEXT:    store i16 0, i16* [[AA]], align 2
16840 // CHECK13-NEXT:    store i8 0, i8* [[AAA]], align 1
16841 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
16842 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
16843 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16844 // CHECK13-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
16845 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16846 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16847 // CHECK13-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
16848 // CHECK13-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
16849 // CHECK13-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
16850 // CHECK13-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
16851 // CHECK13-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
16852 // CHECK13-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
16853 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16854 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
16855 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
16856 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16857 // CHECK13-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
16858 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16859 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16860 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
16861 // CHECK13-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
16862 // CHECK13:       simd.if.then:
16863 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16864 // CHECK13-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
16865 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16866 // CHECK13:       omp.inner.for.cond:
16867 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
16868 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
16869 // CHECK13-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
16870 // CHECK13-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
16871 // CHECK13-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16872 // CHECK13:       omp.inner.for.body:
16873 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !24
16874 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
16875 // CHECK13-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
16876 // CHECK13-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
16877 // CHECK13-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !24
16878 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
16879 // CHECK13-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
16880 // CHECK13-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !24
16881 // CHECK13-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
16882 // CHECK13-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
16883 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
16884 // CHECK13-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
16885 // CHECK13-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !24
16886 // CHECK13-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !24
16887 // CHECK13-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
16888 // CHECK13-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
16889 // CHECK13-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
16890 // CHECK13-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !24
16891 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
16892 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
16893 // CHECK13-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
16894 // CHECK13-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
16895 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16896 // CHECK13:       omp.body.continue:
16897 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16898 // CHECK13:       omp.inner.for.inc:
16899 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
16900 // CHECK13-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
16901 // CHECK13-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
16902 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
16903 // CHECK13:       omp.inner.for.end:
16904 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16905 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
16906 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
16907 // CHECK13-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
16908 // CHECK13-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
16909 // CHECK13-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
16910 // CHECK13-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
16911 // CHECK13-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
16912 // CHECK13-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
16913 // CHECK13-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
16914 // CHECK13-NEXT:    br label [[SIMD_IF_END]]
16915 // CHECK13:       simd.if.end:
16916 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
16917 // CHECK13-NEXT:    ret i32 [[TMP21]]
16918 //
16919 //
16920 // CHECK13-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
16921 // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
16922 // CHECK13-NEXT:  entry:
16923 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16924 // CHECK13-NEXT:    [[A:%.*]] = alloca i32, align 4
16925 // CHECK13-NEXT:    [[AA:%.*]] = alloca i16, align 2
16926 // CHECK13-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
16927 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16928 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16929 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16930 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16931 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
16932 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16933 // CHECK13-NEXT:    store i32 0, i32* [[A]], align 4
16934 // CHECK13-NEXT:    store i16 0, i16* [[AA]], align 2
16935 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16936 // CHECK13-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
16937 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16938 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
16939 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16940 // CHECK13:       omp.inner.for.cond:
16941 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
16942 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
16943 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
16944 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16945 // CHECK13:       omp.inner.for.body:
16946 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
16947 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
16948 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16949 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27
16950 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !27
16951 // CHECK13-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
16952 // CHECK13-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !27
16953 // CHECK13-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !27
16954 // CHECK13-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
16955 // CHECK13-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
16956 // CHECK13-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
16957 // CHECK13-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !27
16958 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
16959 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
16960 // CHECK13-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
16961 // CHECK13-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
16962 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16963 // CHECK13:       omp.body.continue:
16964 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16965 // CHECK13:       omp.inner.for.inc:
16966 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
16967 // CHECK13-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
16968 // CHECK13-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
16969 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
16970 // CHECK13:       omp.inner.for.end:
16971 // CHECK13-NEXT:    store i32 10, i32* [[I]], align 4
16972 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
16973 // CHECK13-NEXT:    ret i32 [[TMP8]]
16974 //
16975 //
16976 // CHECK14-LABEL: define {{[^@]+}}@_Z3fooi
16977 // CHECK14-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
16978 // CHECK14-NEXT:  entry:
16979 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16980 // CHECK14-NEXT:    [[A:%.*]] = alloca i32, align 4
16981 // CHECK14-NEXT:    [[AA:%.*]] = alloca i16, align 2
16982 // CHECK14-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
16983 // CHECK14-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
16984 // CHECK14-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
16985 // CHECK14-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
16986 // CHECK14-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
16987 // CHECK14-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
16988 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16989 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
16990 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16991 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16992 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16993 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16994 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
16995 // CHECK14-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
16996 // CHECK14-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
16997 // CHECK14-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
16998 // CHECK14-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
16999 // CHECK14-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
17000 // CHECK14-NEXT:    [[A8:%.*]] = alloca i32, align 4
17001 // CHECK14-NEXT:    [[A9:%.*]] = alloca i32, align 4
17002 // CHECK14-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
17003 // CHECK14-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
17004 // CHECK14-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
17005 // CHECK14-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
17006 // CHECK14-NEXT:    [[I24:%.*]] = alloca i32, align 4
17007 // CHECK14-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
17008 // CHECK14-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
17009 // CHECK14-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
17010 // CHECK14-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
17011 // CHECK14-NEXT:    [[I40:%.*]] = alloca i32, align 4
17012 // CHECK14-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
17013 // CHECK14-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
17014 // CHECK14-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
17015 // CHECK14-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
17016 // CHECK14-NEXT:    [[I58:%.*]] = alloca i32, align 4
17017 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17018 // CHECK14-NEXT:    store i32 0, i32* [[A]], align 4
17019 // CHECK14-NEXT:    store i16 0, i16* [[AA]], align 2
17020 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
17021 // CHECK14-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
17022 // CHECK14-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
17023 // CHECK14-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
17024 // CHECK14-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
17025 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
17026 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
17027 // CHECK14-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
17028 // CHECK14-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
17029 // CHECK14-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
17030 // CHECK14-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
17031 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
17032 // CHECK14-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
17033 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
17034 // CHECK14-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17035 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17036 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
17037 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17038 // CHECK14-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
17039 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17040 // CHECK14:       omp.inner.for.cond:
17041 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
17042 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
17043 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
17044 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17045 // CHECK14:       omp.inner.for.body:
17046 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
17047 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
17048 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17049 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
17050 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17051 // CHECK14:       omp.body.continue:
17052 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17053 // CHECK14:       omp.inner.for.inc:
17054 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
17055 // CHECK14-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
17056 // CHECK14-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
17057 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
17058 // CHECK14:       omp.inner.for.end:
17059 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
17060 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
17061 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
17062 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
17063 // CHECK14-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV7]], align 4
17064 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A]], align 4
17065 // CHECK14-NEXT:    store i32 [[TMP14]], i32* [[DOTLINEAR_START]], align 4
17066 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
17067 // CHECK14:       omp.inner.for.cond10:
17068 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
17069 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
17070 // CHECK14-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
17071 // CHECK14-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
17072 // CHECK14:       omp.inner.for.body12:
17073 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
17074 // CHECK14-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1
17075 // CHECK14-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
17076 // CHECK14-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4, !nontemporal !7
17077 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A8]], align 4, !nontemporal !7
17078 // CHECK14-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
17079 // CHECK14-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4, !nontemporal !7
17080 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
17081 // CHECK14:       omp.body.continue16:
17082 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
17083 // CHECK14:       omp.inner.for.inc17:
17084 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
17085 // CHECK14-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
17086 // CHECK14-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
17087 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]]
17088 // CHECK14:       omp.inner.for.end19:
17089 // CHECK14-NEXT:    store i32 10, i32* [[A]], align 4
17090 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
17091 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
17092 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
17093 // CHECK14-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_IV23]], align 4
17094 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
17095 // CHECK14:       omp.inner.for.cond25:
17096 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
17097 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !10
17098 // CHECK14-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]]
17099 // CHECK14-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
17100 // CHECK14:       omp.inner.for.body27:
17101 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
17102 // CHECK14-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1
17103 // CHECK14-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
17104 // CHECK14-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !10
17105 // CHECK14-NEXT:    [[TMP24:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
17106 // CHECK14-NEXT:    [[CONV:%.*]] = sext i16 [[TMP24]] to i32
17107 // CHECK14-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
17108 // CHECK14-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
17109 // CHECK14-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !10
17110 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
17111 // CHECK14:       omp.body.continue32:
17112 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
17113 // CHECK14:       omp.inner.for.inc33:
17114 // CHECK14-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
17115 // CHECK14-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1
17116 // CHECK14-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
17117 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]]
17118 // CHECK14:       omp.inner.for.end35:
17119 // CHECK14-NEXT:    store i32 10, i32* [[I24]], align 4
17120 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
17121 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
17122 // CHECK14-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
17123 // CHECK14-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV39]], align 4
17124 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
17125 // CHECK14:       omp.inner.for.cond41:
17126 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
17127 // CHECK14-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !13
17128 // CHECK14-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
17129 // CHECK14-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
17130 // CHECK14:       omp.inner.for.body43:
17131 // CHECK14-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
17132 // CHECK14-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1
17133 // CHECK14-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
17134 // CHECK14-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !13
17135 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
17136 // CHECK14-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1
17137 // CHECK14-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !13
17138 // CHECK14-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
17139 // CHECK14-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP31]] to i32
17140 // CHECK14-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
17141 // CHECK14-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
17142 // CHECK14-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !13
17143 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
17144 // CHECK14:       omp.body.continue50:
17145 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
17146 // CHECK14:       omp.inner.for.inc51:
17147 // CHECK14-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
17148 // CHECK14-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1
17149 // CHECK14-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
17150 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]]
17151 // CHECK14:       omp.inner.for.end53:
17152 // CHECK14-NEXT:    store i32 10, i32* [[I40]], align 4
17153 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
17154 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
17155 // CHECK14-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
17156 // CHECK14-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV57]], align 4
17157 // CHECK14-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 0
17158 // CHECK14-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
17159 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
17160 // CHECK14:       omp.inner.for.cond59:
17161 // CHECK14-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
17162 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !16
17163 // CHECK14-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]]
17164 // CHECK14-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
17165 // CHECK14:       omp.inner.for.body61:
17166 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
17167 // CHECK14-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1
17168 // CHECK14-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
17169 // CHECK14-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !16
17170 // CHECK14-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
17171 // CHECK14-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1
17172 // CHECK14-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !16
17173 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
17174 // CHECK14-NEXT:    [[TMP38:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
17175 // CHECK14-NEXT:    [[CONV65:%.*]] = fpext float [[TMP38]] to double
17176 // CHECK14-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
17177 // CHECK14-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
17178 // CHECK14-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
17179 // CHECK14-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
17180 // CHECK14-NEXT:    [[TMP39:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
17181 // CHECK14-NEXT:    [[CONV69:%.*]] = fpext float [[TMP39]] to double
17182 // CHECK14-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
17183 // CHECK14-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
17184 // CHECK14-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
17185 // CHECK14-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
17186 // CHECK14-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i64 0, i64 2
17187 // CHECK14-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
17188 // CHECK14-NEXT:    [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00
17189 // CHECK14-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
17190 // CHECK14-NEXT:    [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]]
17191 // CHECK14-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP41]]
17192 // CHECK14-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i64 3
17193 // CHECK14-NEXT:    [[TMP42:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
17194 // CHECK14-NEXT:    [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00
17195 // CHECK14-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
17196 // CHECK14-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
17197 // CHECK14-NEXT:    [[TMP43:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !16
17198 // CHECK14-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1
17199 // CHECK14-NEXT:    store i64 [[ADD78]], i64* [[X]], align 8, !llvm.access.group !16
17200 // CHECK14-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
17201 // CHECK14-NEXT:    [[TMP44:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !16
17202 // CHECK14-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP44]] to i32
17203 // CHECK14-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
17204 // CHECK14-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
17205 // CHECK14-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 8, !llvm.access.group !16
17206 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
17207 // CHECK14:       omp.body.continue82:
17208 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
17209 // CHECK14:       omp.inner.for.inc83:
17210 // CHECK14-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
17211 // CHECK14-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1
17212 // CHECK14-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
17213 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]]
17214 // CHECK14:       omp.inner.for.end85:
17215 // CHECK14-NEXT:    store i32 10, i32* [[I58]], align 4
17216 // CHECK14-NEXT:    [[TMP46:%.*]] = load i32, i32* [[A]], align 4
17217 // CHECK14-NEXT:    [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
17218 // CHECK14-NEXT:    call void @llvm.stackrestore(i8* [[TMP47]])
17219 // CHECK14-NEXT:    ret i32 [[TMP46]]
17220 //
17221 //
17222 // CHECK14-LABEL: define {{[^@]+}}@_Z3bari
17223 // CHECK14-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
17224 // CHECK14-NEXT:  entry:
17225 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17226 // CHECK14-NEXT:    [[A:%.*]] = alloca i32, align 4
17227 // CHECK14-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
17228 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17229 // CHECK14-NEXT:    store i32 0, i32* [[A]], align 4
17230 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
17231 // CHECK14-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
17232 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
17233 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
17234 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
17235 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17236 // CHECK14-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
17237 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
17238 // CHECK14-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
17239 // CHECK14-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
17240 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
17241 // CHECK14-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
17242 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
17243 // CHECK14-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
17244 // CHECK14-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
17245 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
17246 // CHECK14-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
17247 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
17248 // CHECK14-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
17249 // CHECK14-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
17250 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
17251 // CHECK14-NEXT:    ret i32 [[TMP8]]
17252 //
17253 //
17254 // CHECK14-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
17255 // CHECK14-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
17256 // CHECK14-NEXT:  entry:
17257 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
17258 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17259 // CHECK14-NEXT:    [[B:%.*]] = alloca i32, align 4
17260 // CHECK14-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
17261 // CHECK14-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
17262 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
17263 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17264 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17265 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17266 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17267 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
17268 // CHECK14-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
17269 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17270 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
17271 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
17272 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
17273 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
17274 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
17275 // CHECK14-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
17276 // CHECK14-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
17277 // CHECK14-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
17278 // CHECK14-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
17279 // CHECK14-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
17280 // CHECK14-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
17281 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
17282 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
17283 // CHECK14-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
17284 // CHECK14-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
17285 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17286 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
17287 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17288 // CHECK14-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
17289 // CHECK14-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
17290 // CHECK14-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
17291 // CHECK14-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
17292 // CHECK14:       omp_if.then:
17293 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17294 // CHECK14:       omp.inner.for.cond:
17295 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
17296 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
17297 // CHECK14-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
17298 // CHECK14-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17299 // CHECK14:       omp.inner.for.body:
17300 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
17301 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
17302 // CHECK14-NEXT:    [[ADD3:%.*]] = add nsw i32 0, [[MUL]]
17303 // CHECK14-NEXT:    store i32 [[ADD3]], i32* [[I]], align 4, !llvm.access.group !19
17304 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
17305 // CHECK14-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP11]] to double
17306 // CHECK14-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
17307 // CHECK14-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
17308 // CHECK14-NEXT:    store double [[ADD4]], double* [[A]], align 8, !llvm.access.group !19
17309 // CHECK14-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
17310 // CHECK14-NEXT:    [[TMP12:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !19
17311 // CHECK14-NEXT:    [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00
17312 // CHECK14-NEXT:    store double [[INC]], double* [[A5]], align 8, !llvm.access.group !19
17313 // CHECK14-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
17314 // CHECK14-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
17315 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
17316 // CHECK14-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
17317 // CHECK14-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !19
17318 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17319 // CHECK14:       omp.body.continue:
17320 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17321 // CHECK14:       omp.inner.for.inc:
17322 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
17323 // CHECK14-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP14]], 1
17324 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
17325 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
17326 // CHECK14:       omp.inner.for.end:
17327 // CHECK14-NEXT:    br label [[OMP_IF_END:%.*]]
17328 // CHECK14:       omp_if.else:
17329 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
17330 // CHECK14:       omp.inner.for.cond9:
17331 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17332 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17333 // CHECK14-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
17334 // CHECK14-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]]
17335 // CHECK14:       omp.inner.for.body11:
17336 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17337 // CHECK14-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP17]], 1
17338 // CHECK14-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
17339 // CHECK14-NEXT:    store i32 [[ADD13]], i32* [[I]], align 4
17340 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[B]], align 4
17341 // CHECK14-NEXT:    [[CONV14:%.*]] = sitofp i32 [[TMP18]] to double
17342 // CHECK14-NEXT:    [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00
17343 // CHECK14-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
17344 // CHECK14-NEXT:    store double [[ADD15]], double* [[A16]], align 8
17345 // CHECK14-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
17346 // CHECK14-NEXT:    [[TMP19:%.*]] = load double, double* [[A17]], align 8
17347 // CHECK14-NEXT:    [[INC18:%.*]] = fadd double [[TMP19]], 1.000000e+00
17348 // CHECK14-NEXT:    store double [[INC18]], double* [[A17]], align 8
17349 // CHECK14-NEXT:    [[CONV19:%.*]] = fptosi double [[INC18]] to i16
17350 // CHECK14-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]]
17351 // CHECK14-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]]
17352 // CHECK14-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX20]], i64 1
17353 // CHECK14-NEXT:    store i16 [[CONV19]], i16* [[ARRAYIDX21]], align 2
17354 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE22:%.*]]
17355 // CHECK14:       omp.body.continue22:
17356 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC23:%.*]]
17357 // CHECK14:       omp.inner.for.inc23:
17358 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17359 // CHECK14-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP21]], 1
17360 // CHECK14-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4
17361 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP22:![0-9]+]]
17362 // CHECK14:       omp.inner.for.end25:
17363 // CHECK14-NEXT:    br label [[OMP_IF_END]]
17364 // CHECK14:       omp_if.end:
17365 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
17366 // CHECK14-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
17367 // CHECK14-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]]
17368 // CHECK14-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1
17369 // CHECK14-NEXT:    [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX27]], align 2
17370 // CHECK14-NEXT:    [[CONV28:%.*]] = sext i16 [[TMP23]] to i32
17371 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[B]], align 4
17372 // CHECK14-NEXT:    [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP24]]
17373 // CHECK14-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
17374 // CHECK14-NEXT:    call void @llvm.stackrestore(i8* [[TMP25]])
17375 // CHECK14-NEXT:    ret i32 [[ADD29]]
17376 //
17377 //
17378 // CHECK14-LABEL: define {{[^@]+}}@_ZL7fstatici
17379 // CHECK14-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
17380 // CHECK14-NEXT:  entry:
17381 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17382 // CHECK14-NEXT:    [[A:%.*]] = alloca i32, align 4
17383 // CHECK14-NEXT:    [[AA:%.*]] = alloca i16, align 2
17384 // CHECK14-NEXT:    [[AAA:%.*]] = alloca i8, align 1
17385 // CHECK14-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
17386 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17387 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17388 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17389 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17390 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17391 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17392 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
17393 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17394 // CHECK14-NEXT:    [[I5:%.*]] = alloca i32, align 4
17395 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17396 // CHECK14-NEXT:    store i32 0, i32* [[A]], align 4
17397 // CHECK14-NEXT:    store i16 0, i16* [[AA]], align 2
17398 // CHECK14-NEXT:    store i8 0, i8* [[AAA]], align 1
17399 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
17400 // CHECK14-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
17401 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
17402 // CHECK14-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17403 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17404 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17405 // CHECK14-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
17406 // CHECK14-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
17407 // CHECK14-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
17408 // CHECK14-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
17409 // CHECK14-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
17410 // CHECK14-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17411 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17412 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17413 // CHECK14-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
17414 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17415 // CHECK14-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
17416 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17417 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17418 // CHECK14-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
17419 // CHECK14-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
17420 // CHECK14:       simd.if.then:
17421 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17422 // CHECK14-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
17423 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17424 // CHECK14:       omp.inner.for.cond:
17425 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
17426 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
17427 // CHECK14-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
17428 // CHECK14-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
17429 // CHECK14-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17430 // CHECK14:       omp.inner.for.body:
17431 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !24
17432 // CHECK14-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
17433 // CHECK14-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
17434 // CHECK14-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
17435 // CHECK14-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !24
17436 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
17437 // CHECK14-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
17438 // CHECK14-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !24
17439 // CHECK14-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
17440 // CHECK14-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
17441 // CHECK14-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
17442 // CHECK14-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
17443 // CHECK14-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !24
17444 // CHECK14-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !24
17445 // CHECK14-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
17446 // CHECK14-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
17447 // CHECK14-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
17448 // CHECK14-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !24
17449 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
17450 // CHECK14-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
17451 // CHECK14-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
17452 // CHECK14-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
17453 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17454 // CHECK14:       omp.body.continue:
17455 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17456 // CHECK14:       omp.inner.for.inc:
17457 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
17458 // CHECK14-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
17459 // CHECK14-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
17460 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
17461 // CHECK14:       omp.inner.for.end:
17462 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17463 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17464 // CHECK14-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17465 // CHECK14-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
17466 // CHECK14-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
17467 // CHECK14-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
17468 // CHECK14-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
17469 // CHECK14-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
17470 // CHECK14-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
17471 // CHECK14-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
17472 // CHECK14-NEXT:    br label [[SIMD_IF_END]]
17473 // CHECK14:       simd.if.end:
17474 // CHECK14-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
17475 // CHECK14-NEXT:    ret i32 [[TMP21]]
17476 //
17477 //
17478 // CHECK14-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
17479 // CHECK14-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
17480 // CHECK14-NEXT:  entry:
17481 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17482 // CHECK14-NEXT:    [[A:%.*]] = alloca i32, align 4
17483 // CHECK14-NEXT:    [[AA:%.*]] = alloca i16, align 2
17484 // CHECK14-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
17485 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17486 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17487 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17488 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17489 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
17490 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17491 // CHECK14-NEXT:    store i32 0, i32* [[A]], align 4
17492 // CHECK14-NEXT:    store i16 0, i16* [[AA]], align 2
17493 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17494 // CHECK14-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
17495 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17496 // CHECK14-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
17497 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17498 // CHECK14:       omp.inner.for.cond:
17499 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
17500 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
17501 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
17502 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17503 // CHECK14:       omp.inner.for.body:
17504 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
17505 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
17506 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17507 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27
17508 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !27
17509 // CHECK14-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
17510 // CHECK14-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !27
17511 // CHECK14-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !27
17512 // CHECK14-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
17513 // CHECK14-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
17514 // CHECK14-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
17515 // CHECK14-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !27
17516 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
17517 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
17518 // CHECK14-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
17519 // CHECK14-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
17520 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17521 // CHECK14:       omp.body.continue:
17522 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17523 // CHECK14:       omp.inner.for.inc:
17524 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
17525 // CHECK14-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
17526 // CHECK14-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
17527 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
17528 // CHECK14:       omp.inner.for.end:
17529 // CHECK14-NEXT:    store i32 10, i32* [[I]], align 4
17530 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
17531 // CHECK14-NEXT:    ret i32 [[TMP8]]
17532 //
17533 //
17534 // CHECK15-LABEL: define {{[^@]+}}@_Z3fooi
17535 // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
17536 // CHECK15-NEXT:  entry:
17537 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17538 // CHECK15-NEXT:    [[A:%.*]] = alloca i32, align 4
17539 // CHECK15-NEXT:    [[AA:%.*]] = alloca i16, align 2
17540 // CHECK15-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
17541 // CHECK15-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
17542 // CHECK15-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
17543 // CHECK15-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
17544 // CHECK15-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
17545 // CHECK15-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
17546 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17547 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17548 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17549 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17550 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17551 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17552 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
17553 // CHECK15-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
17554 // CHECK15-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
17555 // CHECK15-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
17556 // CHECK15-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
17557 // CHECK15-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
17558 // CHECK15-NEXT:    [[A8:%.*]] = alloca i32, align 4
17559 // CHECK15-NEXT:    [[A9:%.*]] = alloca i32, align 4
17560 // CHECK15-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
17561 // CHECK15-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
17562 // CHECK15-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
17563 // CHECK15-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
17564 // CHECK15-NEXT:    [[I24:%.*]] = alloca i32, align 4
17565 // CHECK15-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
17566 // CHECK15-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
17567 // CHECK15-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
17568 // CHECK15-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
17569 // CHECK15-NEXT:    [[I40:%.*]] = alloca i32, align 4
17570 // CHECK15-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
17571 // CHECK15-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
17572 // CHECK15-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
17573 // CHECK15-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
17574 // CHECK15-NEXT:    [[I58:%.*]] = alloca i32, align 4
17575 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17576 // CHECK15-NEXT:    store i32 0, i32* [[A]], align 4
17577 // CHECK15-NEXT:    store i16 0, i16* [[AA]], align 2
17578 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
17579 // CHECK15-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
17580 // CHECK15-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
17581 // CHECK15-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
17582 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
17583 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17584 // CHECK15-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
17585 // CHECK15-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
17586 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
17587 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
17588 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
17589 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
17590 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17591 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17592 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
17593 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17594 // CHECK15-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
17595 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17596 // CHECK15:       omp.inner.for.cond:
17597 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
17598 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
17599 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
17600 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17601 // CHECK15:       omp.inner.for.body:
17602 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
17603 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
17604 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17605 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
17606 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17607 // CHECK15:       omp.body.continue:
17608 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17609 // CHECK15:       omp.inner.for.inc:
17610 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
17611 // CHECK15-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
17612 // CHECK15-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
17613 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
17614 // CHECK15:       omp.inner.for.end:
17615 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
17616 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
17617 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
17618 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
17619 // CHECK15-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV7]], align 4
17620 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[A]], align 4
17621 // CHECK15-NEXT:    store i32 [[TMP12]], i32* [[DOTLINEAR_START]], align 4
17622 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
17623 // CHECK15:       omp.inner.for.cond10:
17624 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
17625 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
17626 // CHECK15-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
17627 // CHECK15-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
17628 // CHECK15:       omp.inner.for.body12:
17629 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
17630 // CHECK15-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1
17631 // CHECK15-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
17632 // CHECK15-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4, !nontemporal !8
17633 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A8]], align 4, !nontemporal !8
17634 // CHECK15-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
17635 // CHECK15-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4, !nontemporal !8
17636 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
17637 // CHECK15:       omp.body.continue16:
17638 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
17639 // CHECK15:       omp.inner.for.inc17:
17640 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
17641 // CHECK15-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
17642 // CHECK15-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
17643 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP9:![0-9]+]]
17644 // CHECK15:       omp.inner.for.end19:
17645 // CHECK15-NEXT:    store i32 10, i32* [[A]], align 4
17646 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
17647 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
17648 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
17649 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV23]], align 4
17650 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
17651 // CHECK15:       omp.inner.for.cond25:
17652 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
17653 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !11
17654 // CHECK15-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
17655 // CHECK15-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
17656 // CHECK15:       omp.inner.for.body27:
17657 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
17658 // CHECK15-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1
17659 // CHECK15-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
17660 // CHECK15-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !11
17661 // CHECK15-NEXT:    [[TMP22:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !11
17662 // CHECK15-NEXT:    [[CONV:%.*]] = sext i16 [[TMP22]] to i32
17663 // CHECK15-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
17664 // CHECK15-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
17665 // CHECK15-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !11
17666 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
17667 // CHECK15:       omp.body.continue32:
17668 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
17669 // CHECK15:       omp.inner.for.inc33:
17670 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
17671 // CHECK15-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1
17672 // CHECK15-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
17673 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP12:![0-9]+]]
17674 // CHECK15:       omp.inner.for.end35:
17675 // CHECK15-NEXT:    store i32 10, i32* [[I24]], align 4
17676 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
17677 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
17678 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
17679 // CHECK15-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV39]], align 4
17680 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
17681 // CHECK15:       omp.inner.for.cond41:
17682 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
17683 // CHECK15-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !14
17684 // CHECK15-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
17685 // CHECK15-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
17686 // CHECK15:       omp.inner.for.body43:
17687 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
17688 // CHECK15-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1
17689 // CHECK15-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
17690 // CHECK15-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !14
17691 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !14
17692 // CHECK15-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1
17693 // CHECK15-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !14
17694 // CHECK15-NEXT:    [[TMP29:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !14
17695 // CHECK15-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP29]] to i32
17696 // CHECK15-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
17697 // CHECK15-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
17698 // CHECK15-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !14
17699 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
17700 // CHECK15:       omp.body.continue50:
17701 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
17702 // CHECK15:       omp.inner.for.inc51:
17703 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
17704 // CHECK15-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1
17705 // CHECK15-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
17706 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP15:![0-9]+]]
17707 // CHECK15:       omp.inner.for.end53:
17708 // CHECK15-NEXT:    store i32 10, i32* [[I40]], align 4
17709 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
17710 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
17711 // CHECK15-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
17712 // CHECK15-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV57]], align 4
17713 // CHECK15-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 0
17714 // CHECK15-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
17715 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
17716 // CHECK15:       omp.inner.for.cond59:
17717 // CHECK15-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
17718 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !17
17719 // CHECK15-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]]
17720 // CHECK15-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
17721 // CHECK15:       omp.inner.for.body61:
17722 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
17723 // CHECK15-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1
17724 // CHECK15-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
17725 // CHECK15-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !17
17726 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !17
17727 // CHECK15-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1
17728 // CHECK15-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !17
17729 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
17730 // CHECK15-NEXT:    [[TMP36:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !17
17731 // CHECK15-NEXT:    [[CONV65:%.*]] = fpext float [[TMP36]] to double
17732 // CHECK15-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
17733 // CHECK15-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
17734 // CHECK15-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !17
17735 // CHECK15-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
17736 // CHECK15-NEXT:    [[TMP37:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !17
17737 // CHECK15-NEXT:    [[CONV69:%.*]] = fpext float [[TMP37]] to double
17738 // CHECK15-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
17739 // CHECK15-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
17740 // CHECK15-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !17
17741 // CHECK15-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
17742 // CHECK15-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i32 0, i32 2
17743 // CHECK15-NEXT:    [[TMP38:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !17
17744 // CHECK15-NEXT:    [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00
17745 // CHECK15-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !17
17746 // CHECK15-NEXT:    [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]]
17747 // CHECK15-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP39]]
17748 // CHECK15-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i32 3
17749 // CHECK15-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !17
17750 // CHECK15-NEXT:    [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00
17751 // CHECK15-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !17
17752 // CHECK15-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
17753 // CHECK15-NEXT:    [[TMP41:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !17
17754 // CHECK15-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1
17755 // CHECK15-NEXT:    store i64 [[ADD78]], i64* [[X]], align 4, !llvm.access.group !17
17756 // CHECK15-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
17757 // CHECK15-NEXT:    [[TMP42:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !17
17758 // CHECK15-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP42]] to i32
17759 // CHECK15-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
17760 // CHECK15-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
17761 // CHECK15-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 4, !llvm.access.group !17
17762 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
17763 // CHECK15:       omp.body.continue82:
17764 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
17765 // CHECK15:       omp.inner.for.inc83:
17766 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
17767 // CHECK15-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1
17768 // CHECK15-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
17769 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP18:![0-9]+]]
17770 // CHECK15:       omp.inner.for.end85:
17771 // CHECK15-NEXT:    store i32 10, i32* [[I58]], align 4
17772 // CHECK15-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
17773 // CHECK15-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
17774 // CHECK15-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
17775 // CHECK15-NEXT:    ret i32 [[TMP44]]
17776 //
17777 //
17778 // CHECK15-LABEL: define {{[^@]+}}@_Z3bari
17779 // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
17780 // CHECK15-NEXT:  entry:
17781 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17782 // CHECK15-NEXT:    [[A:%.*]] = alloca i32, align 4
17783 // CHECK15-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
17784 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17785 // CHECK15-NEXT:    store i32 0, i32* [[A]], align 4
17786 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
17787 // CHECK15-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
17788 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
17789 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
17790 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
17791 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
17792 // CHECK15-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
17793 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
17794 // CHECK15-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
17795 // CHECK15-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
17796 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
17797 // CHECK15-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
17798 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
17799 // CHECK15-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
17800 // CHECK15-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
17801 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
17802 // CHECK15-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
17803 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
17804 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
17805 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
17806 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
17807 // CHECK15-NEXT:    ret i32 [[TMP8]]
17808 //
17809 //
17810 // CHECK15-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
17811 // CHECK15-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
17812 // CHECK15-NEXT:  entry:
17813 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
17814 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17815 // CHECK15-NEXT:    [[B:%.*]] = alloca i32, align 4
17816 // CHECK15-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
17817 // CHECK15-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
17818 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
17819 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17820 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17821 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17822 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17823 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
17824 // CHECK15-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
17825 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17826 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
17827 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
17828 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
17829 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
17830 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
17831 // CHECK15-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
17832 // CHECK15-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
17833 // CHECK15-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
17834 // CHECK15-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
17835 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
17836 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
17837 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
17838 // CHECK15-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
17839 // CHECK15-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
17840 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17841 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
17842 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17843 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
17844 // CHECK15-NEXT:    [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
17845 // CHECK15-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
17846 // CHECK15-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
17847 // CHECK15:       omp_if.then:
17848 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17849 // CHECK15:       omp.inner.for.cond:
17850 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
17851 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
17852 // CHECK15-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
17853 // CHECK15-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17854 // CHECK15:       omp.inner.for.body:
17855 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
17856 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
17857 // CHECK15-NEXT:    [[ADD3:%.*]] = add nsw i32 0, [[MUL]]
17858 // CHECK15-NEXT:    store i32 [[ADD3]], i32* [[I]], align 4, !llvm.access.group !20
17859 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !20
17860 // CHECK15-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP10]] to double
17861 // CHECK15-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
17862 // CHECK15-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
17863 // CHECK15-NEXT:    store double [[ADD4]], double* [[A]], align 4, !llvm.access.group !20
17864 // CHECK15-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
17865 // CHECK15-NEXT:    [[TMP11:%.*]] = load double, double* [[A5]], align 4, !llvm.access.group !20
17866 // CHECK15-NEXT:    [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00
17867 // CHECK15-NEXT:    store double [[INC]], double* [[A5]], align 4, !llvm.access.group !20
17868 // CHECK15-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
17869 // CHECK15-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
17870 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
17871 // CHECK15-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
17872 // CHECK15-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !20
17873 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17874 // CHECK15:       omp.body.continue:
17875 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17876 // CHECK15:       omp.inner.for.inc:
17877 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
17878 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
17879 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
17880 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
17881 // CHECK15:       omp.inner.for.end:
17882 // CHECK15-NEXT:    br label [[OMP_IF_END:%.*]]
17883 // CHECK15:       omp_if.else:
17884 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
17885 // CHECK15:       omp.inner.for.cond9:
17886 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17887 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17888 // CHECK15-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
17889 // CHECK15-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]]
17890 // CHECK15:       omp.inner.for.body11:
17891 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17892 // CHECK15-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP16]], 1
17893 // CHECK15-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
17894 // CHECK15-NEXT:    store i32 [[ADD13]], i32* [[I]], align 4
17895 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[B]], align 4
17896 // CHECK15-NEXT:    [[CONV14:%.*]] = sitofp i32 [[TMP17]] to double
17897 // CHECK15-NEXT:    [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00
17898 // CHECK15-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
17899 // CHECK15-NEXT:    store double [[ADD15]], double* [[A16]], align 4
17900 // CHECK15-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
17901 // CHECK15-NEXT:    [[TMP18:%.*]] = load double, double* [[A17]], align 4
17902 // CHECK15-NEXT:    [[INC18:%.*]] = fadd double [[TMP18]], 1.000000e+00
17903 // CHECK15-NEXT:    store double [[INC18]], double* [[A17]], align 4
17904 // CHECK15-NEXT:    [[CONV19:%.*]] = fptosi double [[INC18]] to i16
17905 // CHECK15-NEXT:    [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]]
17906 // CHECK15-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]]
17907 // CHECK15-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX20]], i32 1
17908 // CHECK15-NEXT:    store i16 [[CONV19]], i16* [[ARRAYIDX21]], align 2
17909 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE22:%.*]]
17910 // CHECK15:       omp.body.continue22:
17911 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC23:%.*]]
17912 // CHECK15:       omp.inner.for.inc23:
17913 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17914 // CHECK15-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP20]], 1
17915 // CHECK15-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4
17916 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP23:![0-9]+]]
17917 // CHECK15:       omp.inner.for.end25:
17918 // CHECK15-NEXT:    br label [[OMP_IF_END]]
17919 // CHECK15:       omp_if.end:
17920 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
17921 // CHECK15-NEXT:    [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]]
17922 // CHECK15-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]]
17923 // CHECK15-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i32 1
17924 // CHECK15-NEXT:    [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX27]], align 2
17925 // CHECK15-NEXT:    [[CONV28:%.*]] = sext i16 [[TMP22]] to i32
17926 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[B]], align 4
17927 // CHECK15-NEXT:    [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP23]]
17928 // CHECK15-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
17929 // CHECK15-NEXT:    call void @llvm.stackrestore(i8* [[TMP24]])
17930 // CHECK15-NEXT:    ret i32 [[ADD29]]
17931 //
17932 //
17933 // CHECK15-LABEL: define {{[^@]+}}@_ZL7fstatici
17934 // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
17935 // CHECK15-NEXT:  entry:
17936 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
17937 // CHECK15-NEXT:    [[A:%.*]] = alloca i32, align 4
17938 // CHECK15-NEXT:    [[AA:%.*]] = alloca i16, align 2
17939 // CHECK15-NEXT:    [[AAA:%.*]] = alloca i8, align 1
17940 // CHECK15-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
17941 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17942 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
17943 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
17944 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
17945 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17946 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17947 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
17948 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17949 // CHECK15-NEXT:    [[I5:%.*]] = alloca i32, align 4
17950 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17951 // CHECK15-NEXT:    store i32 0, i32* [[A]], align 4
17952 // CHECK15-NEXT:    store i16 0, i16* [[AA]], align 2
17953 // CHECK15-NEXT:    store i8 0, i8* [[AAA]], align 1
17954 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
17955 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
17956 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
17957 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
17958 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17959 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17960 // CHECK15-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
17961 // CHECK15-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
17962 // CHECK15-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
17963 // CHECK15-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
17964 // CHECK15-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
17965 // CHECK15-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
17966 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17967 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
17968 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
17969 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17970 // CHECK15-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
17971 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
17972 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
17973 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
17974 // CHECK15-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
17975 // CHECK15:       simd.if.then:
17976 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17977 // CHECK15-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
17978 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17979 // CHECK15:       omp.inner.for.cond:
17980 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
17981 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
17982 // CHECK15-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
17983 // CHECK15-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
17984 // CHECK15-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17985 // CHECK15:       omp.inner.for.body:
17986 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !25
17987 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
17988 // CHECK15-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
17989 // CHECK15-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
17990 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !25
17991 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
17992 // CHECK15-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
17993 // CHECK15-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !25
17994 // CHECK15-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
17995 // CHECK15-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
17996 // CHECK15-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
17997 // CHECK15-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
17998 // CHECK15-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !25
17999 // CHECK15-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !25
18000 // CHECK15-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
18001 // CHECK15-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
18002 // CHECK15-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
18003 // CHECK15-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !25
18004 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
18005 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
18006 // CHECK15-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
18007 // CHECK15-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
18008 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18009 // CHECK15:       omp.body.continue:
18010 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18011 // CHECK15:       omp.inner.for.inc:
18012 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
18013 // CHECK15-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
18014 // CHECK15-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
18015 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
18016 // CHECK15:       omp.inner.for.end:
18017 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18018 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18019 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18020 // CHECK15-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
18021 // CHECK15-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
18022 // CHECK15-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
18023 // CHECK15-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
18024 // CHECK15-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
18025 // CHECK15-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
18026 // CHECK15-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
18027 // CHECK15-NEXT:    br label [[SIMD_IF_END]]
18028 // CHECK15:       simd.if.end:
18029 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
18030 // CHECK15-NEXT:    ret i32 [[TMP21]]
18031 //
18032 //
18033 // CHECK15-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
18034 // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
18035 // CHECK15-NEXT:  entry:
18036 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18037 // CHECK15-NEXT:    [[A:%.*]] = alloca i32, align 4
18038 // CHECK15-NEXT:    [[AA:%.*]] = alloca i16, align 2
18039 // CHECK15-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
18040 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18041 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18042 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18043 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18044 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
18045 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18046 // CHECK15-NEXT:    store i32 0, i32* [[A]], align 4
18047 // CHECK15-NEXT:    store i16 0, i16* [[AA]], align 2
18048 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18049 // CHECK15-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18050 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18051 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
18052 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18053 // CHECK15:       omp.inner.for.cond:
18054 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
18055 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
18056 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
18057 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18058 // CHECK15:       omp.inner.for.body:
18059 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
18060 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
18061 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18062 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !28
18063 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !28
18064 // CHECK15-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
18065 // CHECK15-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !28
18066 // CHECK15-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !28
18067 // CHECK15-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
18068 // CHECK15-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
18069 // CHECK15-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
18070 // CHECK15-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !28
18071 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
18072 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
18073 // CHECK15-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
18074 // CHECK15-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
18075 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18076 // CHECK15:       omp.body.continue:
18077 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18078 // CHECK15:       omp.inner.for.inc:
18079 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
18080 // CHECK15-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
18081 // CHECK15-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
18082 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
18083 // CHECK15:       omp.inner.for.end:
18084 // CHECK15-NEXT:    store i32 10, i32* [[I]], align 4
18085 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
18086 // CHECK15-NEXT:    ret i32 [[TMP8]]
18087 //
18088 //
18089 // CHECK16-LABEL: define {{[^@]+}}@_Z3fooi
18090 // CHECK16-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
18091 // CHECK16-NEXT:  entry:
18092 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18093 // CHECK16-NEXT:    [[A:%.*]] = alloca i32, align 4
18094 // CHECK16-NEXT:    [[AA:%.*]] = alloca i16, align 2
18095 // CHECK16-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
18096 // CHECK16-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
18097 // CHECK16-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
18098 // CHECK16-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
18099 // CHECK16-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
18100 // CHECK16-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
18101 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18102 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
18103 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18104 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18105 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18106 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18107 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18108 // CHECK16-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
18109 // CHECK16-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
18110 // CHECK16-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
18111 // CHECK16-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
18112 // CHECK16-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
18113 // CHECK16-NEXT:    [[A8:%.*]] = alloca i32, align 4
18114 // CHECK16-NEXT:    [[A9:%.*]] = alloca i32, align 4
18115 // CHECK16-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
18116 // CHECK16-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
18117 // CHECK16-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
18118 // CHECK16-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
18119 // CHECK16-NEXT:    [[I24:%.*]] = alloca i32, align 4
18120 // CHECK16-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
18121 // CHECK16-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
18122 // CHECK16-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
18123 // CHECK16-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
18124 // CHECK16-NEXT:    [[I40:%.*]] = alloca i32, align 4
18125 // CHECK16-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
18126 // CHECK16-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
18127 // CHECK16-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
18128 // CHECK16-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
18129 // CHECK16-NEXT:    [[I58:%.*]] = alloca i32, align 4
18130 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18131 // CHECK16-NEXT:    store i32 0, i32* [[A]], align 4
18132 // CHECK16-NEXT:    store i16 0, i16* [[AA]], align 2
18133 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
18134 // CHECK16-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
18135 // CHECK16-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
18136 // CHECK16-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
18137 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
18138 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
18139 // CHECK16-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
18140 // CHECK16-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
18141 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
18142 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
18143 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
18144 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
18145 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_2]], align 4
18146 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18147 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18148 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18149 // CHECK16-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
18150 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18151 // CHECK16:       omp.inner.for.cond:
18152 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
18153 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
18154 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
18155 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18156 // CHECK16:       omp.inner.for.body:
18157 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
18158 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
18159 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18160 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
18161 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18162 // CHECK16:       omp.body.continue:
18163 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18164 // CHECK16:       omp.inner.for.inc:
18165 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
18166 // CHECK16-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
18167 // CHECK16-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
18168 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
18169 // CHECK16:       omp.inner.for.end:
18170 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
18171 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
18172 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
18173 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
18174 // CHECK16-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV7]], align 4
18175 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[A]], align 4
18176 // CHECK16-NEXT:    store i32 [[TMP12]], i32* [[DOTLINEAR_START]], align 4
18177 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
18178 // CHECK16:       omp.inner.for.cond10:
18179 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
18180 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
18181 // CHECK16-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
18182 // CHECK16-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
18183 // CHECK16:       omp.inner.for.body12:
18184 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
18185 // CHECK16-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1
18186 // CHECK16-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
18187 // CHECK16-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4, !nontemporal !8
18188 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A8]], align 4, !nontemporal !8
18189 // CHECK16-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
18190 // CHECK16-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4, !nontemporal !8
18191 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
18192 // CHECK16:       omp.body.continue16:
18193 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
18194 // CHECK16:       omp.inner.for.inc17:
18195 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
18196 // CHECK16-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
18197 // CHECK16-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
18198 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP9:![0-9]+]]
18199 // CHECK16:       omp.inner.for.end19:
18200 // CHECK16-NEXT:    store i32 10, i32* [[A]], align 4
18201 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
18202 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
18203 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
18204 // CHECK16-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV23]], align 4
18205 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
18206 // CHECK16:       omp.inner.for.cond25:
18207 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
18208 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !11
18209 // CHECK16-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
18210 // CHECK16-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
18211 // CHECK16:       omp.inner.for.body27:
18212 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
18213 // CHECK16-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1
18214 // CHECK16-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
18215 // CHECK16-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !11
18216 // CHECK16-NEXT:    [[TMP22:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !11
18217 // CHECK16-NEXT:    [[CONV:%.*]] = sext i16 [[TMP22]] to i32
18218 // CHECK16-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
18219 // CHECK16-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
18220 // CHECK16-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !11
18221 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
18222 // CHECK16:       omp.body.continue32:
18223 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
18224 // CHECK16:       omp.inner.for.inc33:
18225 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
18226 // CHECK16-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1
18227 // CHECK16-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
18228 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP12:![0-9]+]]
18229 // CHECK16:       omp.inner.for.end35:
18230 // CHECK16-NEXT:    store i32 10, i32* [[I24]], align 4
18231 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
18232 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
18233 // CHECK16-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
18234 // CHECK16-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV39]], align 4
18235 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
18236 // CHECK16:       omp.inner.for.cond41:
18237 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
18238 // CHECK16-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !14
18239 // CHECK16-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
18240 // CHECK16-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
18241 // CHECK16:       omp.inner.for.body43:
18242 // CHECK16-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
18243 // CHECK16-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1
18244 // CHECK16-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
18245 // CHECK16-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !14
18246 // CHECK16-NEXT:    [[TMP28:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !14
18247 // CHECK16-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1
18248 // CHECK16-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !14
18249 // CHECK16-NEXT:    [[TMP29:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !14
18250 // CHECK16-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP29]] to i32
18251 // CHECK16-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
18252 // CHECK16-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
18253 // CHECK16-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !14
18254 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
18255 // CHECK16:       omp.body.continue50:
18256 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
18257 // CHECK16:       omp.inner.for.inc51:
18258 // CHECK16-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
18259 // CHECK16-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1
18260 // CHECK16-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
18261 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP15:![0-9]+]]
18262 // CHECK16:       omp.inner.for.end53:
18263 // CHECK16-NEXT:    store i32 10, i32* [[I40]], align 4
18264 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
18265 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
18266 // CHECK16-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
18267 // CHECK16-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV57]], align 4
18268 // CHECK16-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 0
18269 // CHECK16-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
18270 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
18271 // CHECK16:       omp.inner.for.cond59:
18272 // CHECK16-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
18273 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !17
18274 // CHECK16-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]]
18275 // CHECK16-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
18276 // CHECK16:       omp.inner.for.body61:
18277 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
18278 // CHECK16-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1
18279 // CHECK16-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
18280 // CHECK16-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !17
18281 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !17
18282 // CHECK16-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1
18283 // CHECK16-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !17
18284 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
18285 // CHECK16-NEXT:    [[TMP36:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !17
18286 // CHECK16-NEXT:    [[CONV65:%.*]] = fpext float [[TMP36]] to double
18287 // CHECK16-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
18288 // CHECK16-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
18289 // CHECK16-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !17
18290 // CHECK16-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
18291 // CHECK16-NEXT:    [[TMP37:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !17
18292 // CHECK16-NEXT:    [[CONV69:%.*]] = fpext float [[TMP37]] to double
18293 // CHECK16-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
18294 // CHECK16-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
18295 // CHECK16-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !17
18296 // CHECK16-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
18297 // CHECK16-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i32 0, i32 2
18298 // CHECK16-NEXT:    [[TMP38:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !17
18299 // CHECK16-NEXT:    [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00
18300 // CHECK16-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !17
18301 // CHECK16-NEXT:    [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]]
18302 // CHECK16-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP39]]
18303 // CHECK16-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i32 3
18304 // CHECK16-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !17
18305 // CHECK16-NEXT:    [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00
18306 // CHECK16-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !17
18307 // CHECK16-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
18308 // CHECK16-NEXT:    [[TMP41:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !17
18309 // CHECK16-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1
18310 // CHECK16-NEXT:    store i64 [[ADD78]], i64* [[X]], align 4, !llvm.access.group !17
18311 // CHECK16-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
18312 // CHECK16-NEXT:    [[TMP42:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !17
18313 // CHECK16-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP42]] to i32
18314 // CHECK16-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
18315 // CHECK16-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
18316 // CHECK16-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 4, !llvm.access.group !17
18317 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
18318 // CHECK16:       omp.body.continue82:
18319 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
18320 // CHECK16:       omp.inner.for.inc83:
18321 // CHECK16-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
18322 // CHECK16-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1
18323 // CHECK16-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
18324 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP18:![0-9]+]]
18325 // CHECK16:       omp.inner.for.end85:
18326 // CHECK16-NEXT:    store i32 10, i32* [[I58]], align 4
18327 // CHECK16-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
18328 // CHECK16-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
18329 // CHECK16-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
18330 // CHECK16-NEXT:    ret i32 [[TMP44]]
18331 //
18332 //
18333 // CHECK16-LABEL: define {{[^@]+}}@_Z3bari
18334 // CHECK16-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
18335 // CHECK16-NEXT:  entry:
18336 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18337 // CHECK16-NEXT:    [[A:%.*]] = alloca i32, align 4
18338 // CHECK16-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
18339 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18340 // CHECK16-NEXT:    store i32 0, i32* [[A]], align 4
18341 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
18342 // CHECK16-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
18343 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
18344 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
18345 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
18346 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
18347 // CHECK16-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
18348 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
18349 // CHECK16-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
18350 // CHECK16-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
18351 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
18352 // CHECK16-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
18353 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
18354 // CHECK16-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
18355 // CHECK16-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
18356 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
18357 // CHECK16-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
18358 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
18359 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
18360 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
18361 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
18362 // CHECK16-NEXT:    ret i32 [[TMP8]]
18363 //
18364 //
18365 // CHECK16-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
18366 // CHECK16-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
18367 // CHECK16-NEXT:  entry:
18368 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
18369 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18370 // CHECK16-NEXT:    [[B:%.*]] = alloca i32, align 4
18371 // CHECK16-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
18372 // CHECK16-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
18373 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
18374 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18375 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18376 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18377 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18378 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18379 // CHECK16-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
18380 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18381 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
18382 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
18383 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
18384 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
18385 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
18386 // CHECK16-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
18387 // CHECK16-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
18388 // CHECK16-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
18389 // CHECK16-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
18390 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
18391 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
18392 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
18393 // CHECK16-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
18394 // CHECK16-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
18395 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18396 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18397 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18398 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
18399 // CHECK16-NEXT:    [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
18400 // CHECK16-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
18401 // CHECK16-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
18402 // CHECK16:       omp_if.then:
18403 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18404 // CHECK16:       omp.inner.for.cond:
18405 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
18406 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
18407 // CHECK16-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
18408 // CHECK16-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18409 // CHECK16:       omp.inner.for.body:
18410 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
18411 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
18412 // CHECK16-NEXT:    [[ADD3:%.*]] = add nsw i32 0, [[MUL]]
18413 // CHECK16-NEXT:    store i32 [[ADD3]], i32* [[I]], align 4, !llvm.access.group !20
18414 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !20
18415 // CHECK16-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP10]] to double
18416 // CHECK16-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
18417 // CHECK16-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
18418 // CHECK16-NEXT:    store double [[ADD4]], double* [[A]], align 4, !llvm.access.group !20
18419 // CHECK16-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
18420 // CHECK16-NEXT:    [[TMP11:%.*]] = load double, double* [[A5]], align 4, !llvm.access.group !20
18421 // CHECK16-NEXT:    [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00
18422 // CHECK16-NEXT:    store double [[INC]], double* [[A5]], align 4, !llvm.access.group !20
18423 // CHECK16-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
18424 // CHECK16-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
18425 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
18426 // CHECK16-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
18427 // CHECK16-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !20
18428 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18429 // CHECK16:       omp.body.continue:
18430 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18431 // CHECK16:       omp.inner.for.inc:
18432 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
18433 // CHECK16-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
18434 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
18435 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
18436 // CHECK16:       omp.inner.for.end:
18437 // CHECK16-NEXT:    br label [[OMP_IF_END:%.*]]
18438 // CHECK16:       omp_if.else:
18439 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
18440 // CHECK16:       omp.inner.for.cond9:
18441 // CHECK16-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18442 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18443 // CHECK16-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
18444 // CHECK16-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]]
18445 // CHECK16:       omp.inner.for.body11:
18446 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18447 // CHECK16-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP16]], 1
18448 // CHECK16-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
18449 // CHECK16-NEXT:    store i32 [[ADD13]], i32* [[I]], align 4
18450 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[B]], align 4
18451 // CHECK16-NEXT:    [[CONV14:%.*]] = sitofp i32 [[TMP17]] to double
18452 // CHECK16-NEXT:    [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00
18453 // CHECK16-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
18454 // CHECK16-NEXT:    store double [[ADD15]], double* [[A16]], align 4
18455 // CHECK16-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
18456 // CHECK16-NEXT:    [[TMP18:%.*]] = load double, double* [[A17]], align 4
18457 // CHECK16-NEXT:    [[INC18:%.*]] = fadd double [[TMP18]], 1.000000e+00
18458 // CHECK16-NEXT:    store double [[INC18]], double* [[A17]], align 4
18459 // CHECK16-NEXT:    [[CONV19:%.*]] = fptosi double [[INC18]] to i16
18460 // CHECK16-NEXT:    [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]]
18461 // CHECK16-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]]
18462 // CHECK16-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX20]], i32 1
18463 // CHECK16-NEXT:    store i16 [[CONV19]], i16* [[ARRAYIDX21]], align 2
18464 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE22:%.*]]
18465 // CHECK16:       omp.body.continue22:
18466 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC23:%.*]]
18467 // CHECK16:       omp.inner.for.inc23:
18468 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18469 // CHECK16-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP20]], 1
18470 // CHECK16-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4
18471 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP23:![0-9]+]]
18472 // CHECK16:       omp.inner.for.end25:
18473 // CHECK16-NEXT:    br label [[OMP_IF_END]]
18474 // CHECK16:       omp_if.end:
18475 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
18476 // CHECK16-NEXT:    [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]]
18477 // CHECK16-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]]
18478 // CHECK16-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i32 1
18479 // CHECK16-NEXT:    [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX27]], align 2
18480 // CHECK16-NEXT:    [[CONV28:%.*]] = sext i16 [[TMP22]] to i32
18481 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[B]], align 4
18482 // CHECK16-NEXT:    [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP23]]
18483 // CHECK16-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
18484 // CHECK16-NEXT:    call void @llvm.stackrestore(i8* [[TMP24]])
18485 // CHECK16-NEXT:    ret i32 [[ADD29]]
18486 //
18487 //
18488 // CHECK16-LABEL: define {{[^@]+}}@_ZL7fstatici
18489 // CHECK16-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
18490 // CHECK16-NEXT:  entry:
18491 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18492 // CHECK16-NEXT:    [[A:%.*]] = alloca i32, align 4
18493 // CHECK16-NEXT:    [[AA:%.*]] = alloca i16, align 2
18494 // CHECK16-NEXT:    [[AAA:%.*]] = alloca i8, align 1
18495 // CHECK16-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
18496 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18497 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
18498 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
18499 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
18500 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18501 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18502 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18503 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18504 // CHECK16-NEXT:    [[I5:%.*]] = alloca i32, align 4
18505 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18506 // CHECK16-NEXT:    store i32 0, i32* [[A]], align 4
18507 // CHECK16-NEXT:    store i16 0, i16* [[AA]], align 2
18508 // CHECK16-NEXT:    store i8 0, i8* [[AAA]], align 1
18509 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
18510 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
18511 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
18512 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
18513 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18514 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18515 // CHECK16-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
18516 // CHECK16-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
18517 // CHECK16-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
18518 // CHECK16-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
18519 // CHECK16-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
18520 // CHECK16-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
18521 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18522 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
18523 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
18524 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18525 // CHECK16-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
18526 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18527 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18528 // CHECK16-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
18529 // CHECK16-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
18530 // CHECK16:       simd.if.then:
18531 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18532 // CHECK16-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
18533 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18534 // CHECK16:       omp.inner.for.cond:
18535 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
18536 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
18537 // CHECK16-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
18538 // CHECK16-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
18539 // CHECK16-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18540 // CHECK16:       omp.inner.for.body:
18541 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !25
18542 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
18543 // CHECK16-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
18544 // CHECK16-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
18545 // CHECK16-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !25
18546 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
18547 // CHECK16-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
18548 // CHECK16-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !25
18549 // CHECK16-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
18550 // CHECK16-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
18551 // CHECK16-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
18552 // CHECK16-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
18553 // CHECK16-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !25
18554 // CHECK16-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !25
18555 // CHECK16-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
18556 // CHECK16-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
18557 // CHECK16-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
18558 // CHECK16-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !25
18559 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
18560 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
18561 // CHECK16-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
18562 // CHECK16-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
18563 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18564 // CHECK16:       omp.body.continue:
18565 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18566 // CHECK16:       omp.inner.for.inc:
18567 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
18568 // CHECK16-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
18569 // CHECK16-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
18570 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
18571 // CHECK16:       omp.inner.for.end:
18572 // CHECK16-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18573 // CHECK16-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
18574 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
18575 // CHECK16-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
18576 // CHECK16-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
18577 // CHECK16-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
18578 // CHECK16-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
18579 // CHECK16-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
18580 // CHECK16-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
18581 // CHECK16-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
18582 // CHECK16-NEXT:    br label [[SIMD_IF_END]]
18583 // CHECK16:       simd.if.end:
18584 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
18585 // CHECK16-NEXT:    ret i32 [[TMP21]]
18586 //
18587 //
18588 // CHECK16-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
18589 // CHECK16-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
18590 // CHECK16-NEXT:  entry:
18591 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
18592 // CHECK16-NEXT:    [[A:%.*]] = alloca i32, align 4
18593 // CHECK16-NEXT:    [[AA:%.*]] = alloca i16, align 2
18594 // CHECK16-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
18595 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18596 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18597 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18598 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18599 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
18600 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
18601 // CHECK16-NEXT:    store i32 0, i32* [[A]], align 4
18602 // CHECK16-NEXT:    store i16 0, i16* [[AA]], align 2
18603 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18604 // CHECK16-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18605 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18606 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
18607 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18608 // CHECK16:       omp.inner.for.cond:
18609 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
18610 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
18611 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
18612 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18613 // CHECK16:       omp.inner.for.body:
18614 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
18615 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
18616 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18617 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !28
18618 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !28
18619 // CHECK16-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
18620 // CHECK16-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !28
18621 // CHECK16-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !28
18622 // CHECK16-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
18623 // CHECK16-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
18624 // CHECK16-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
18625 // CHECK16-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !28
18626 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
18627 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
18628 // CHECK16-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
18629 // CHECK16-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
18630 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18631 // CHECK16:       omp.body.continue:
18632 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18633 // CHECK16:       omp.inner.for.inc:
18634 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
18635 // CHECK16-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
18636 // CHECK16-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
18637 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
18638 // CHECK16:       omp.inner.for.end:
18639 // CHECK16-NEXT:    store i32 10, i32* [[I]], align 4
18640 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
18641 // CHECK16-NEXT:    ret i32 [[TMP8]]
18642 //
18643 //
18644 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
18645 // CHECK17-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
18646 // CHECK17-NEXT:  entry:
18647 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18648 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
18649 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
18650 // CHECK17-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
18651 // CHECK17-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
18652 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18653 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
18654 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8
18655 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18656 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
18657 // CHECK17-NEXT:    [[CONV4:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32*
18658 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV3]], align 4
18659 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV4]], align 4
18660 // CHECK17-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
18661 // CHECK17-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
18662 // CHECK17-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
18663 // CHECK17-NEXT:    store i16 [[TMP3]], i16* [[CONV5]], align 2
18664 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
18665 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP4]])
18666 // CHECK17-NEXT:    ret void
18667 //
18668 //
18669 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined.
18670 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
18671 // CHECK17-NEXT:  entry:
18672 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18673 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18674 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18675 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18676 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18677 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18678 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18679 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18680 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18681 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
18682 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18683 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18684 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18685 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18686 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18687 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18688 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18689 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18690 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18691 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
18692 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18693 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18694 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
18695 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18696 // CHECK17:       cond.true:
18697 // CHECK17-NEXT:    br label [[COND_END:%.*]]
18698 // CHECK17:       cond.false:
18699 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18700 // CHECK17-NEXT:    br label [[COND_END]]
18701 // CHECK17:       cond.end:
18702 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
18703 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18704 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18705 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
18706 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18707 // CHECK17:       omp.inner.for.cond:
18708 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
18709 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
18710 // CHECK17-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
18711 // CHECK17-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18712 // CHECK17:       omp.inner.for.body:
18713 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
18714 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
18715 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18716 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
18717 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18718 // CHECK17:       omp.body.continue:
18719 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18720 // CHECK17:       omp.inner.for.inc:
18721 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
18722 // CHECK17-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
18723 // CHECK17-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
18724 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
18725 // CHECK17:       omp.inner.for.end:
18726 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18727 // CHECK17:       omp.loop.exit:
18728 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
18729 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18730 // CHECK17-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
18731 // CHECK17-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18732 // CHECK17:       .omp.final.then:
18733 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
18734 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18735 // CHECK17:       .omp.final.done:
18736 // CHECK17-NEXT:    ret void
18737 //
18738 //
18739 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
18740 // CHECK17-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR0]] {
18741 // CHECK17-NEXT:  entry:
18742 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18743 // CHECK17-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
18744 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18745 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18746 // CHECK17-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
18747 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
18748 // CHECK17-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
18749 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
18750 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
18751 // CHECK17-NEXT:    ret void
18752 //
18753 //
18754 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..1
18755 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
18756 // CHECK17-NEXT:  entry:
18757 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18758 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18759 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18760 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18761 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18762 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18763 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18764 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18765 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18766 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
18767 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18768 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18769 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18770 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18771 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18772 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18773 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18774 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18775 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18776 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
18777 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18778 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18779 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
18780 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18781 // CHECK17:       cond.true:
18782 // CHECK17-NEXT:    br label [[COND_END:%.*]]
18783 // CHECK17:       cond.false:
18784 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18785 // CHECK17-NEXT:    br label [[COND_END]]
18786 // CHECK17:       cond.end:
18787 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
18788 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18789 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18790 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
18791 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18792 // CHECK17:       omp.inner.for.cond:
18793 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18794 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
18795 // CHECK17-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
18796 // CHECK17-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18797 // CHECK17:       omp.inner.for.body:
18798 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18799 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
18800 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18801 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18
18802 // CHECK17-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !18
18803 // CHECK17-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
18804 // CHECK17-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
18805 // CHECK17-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
18806 // CHECK17-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !18
18807 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18808 // CHECK17:       omp.body.continue:
18809 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18810 // CHECK17:       omp.inner.for.inc:
18811 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18812 // CHECK17-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
18813 // CHECK17-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
18814 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
18815 // CHECK17:       omp.inner.for.end:
18816 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18817 // CHECK17:       omp.loop.exit:
18818 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
18819 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18820 // CHECK17-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
18821 // CHECK17-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18822 // CHECK17:       .omp.final.then:
18823 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
18824 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18825 // CHECK17:       .omp.final.done:
18826 // CHECK17-NEXT:    ret void
18827 //
18828 //
18829 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
18830 // CHECK17-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
18831 // CHECK17-NEXT:  entry:
18832 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18833 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18834 // CHECK17-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
18835 // CHECK17-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
18836 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18837 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18838 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18839 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18840 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
18841 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
18842 // CHECK17-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
18843 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
18844 // CHECK17-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
18845 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
18846 // CHECK17-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
18847 // CHECK17-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
18848 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
18849 // CHECK17-NEXT:    ret void
18850 //
18851 //
18852 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..2
18853 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
18854 // CHECK17-NEXT:  entry:
18855 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18856 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18857 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18858 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18859 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18860 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18861 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18862 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18863 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18864 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18865 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
18866 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18867 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18868 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18869 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18870 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18871 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18872 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18873 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
18874 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18875 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18876 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18877 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
18878 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18879 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18880 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
18881 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18882 // CHECK17:       cond.true:
18883 // CHECK17-NEXT:    br label [[COND_END:%.*]]
18884 // CHECK17:       cond.false:
18885 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18886 // CHECK17-NEXT:    br label [[COND_END]]
18887 // CHECK17:       cond.end:
18888 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
18889 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18890 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18891 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
18892 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18893 // CHECK17:       omp.inner.for.cond:
18894 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
18895 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
18896 // CHECK17-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
18897 // CHECK17-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18898 // CHECK17:       omp.inner.for.body:
18899 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
18900 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
18901 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
18902 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
18903 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !21
18904 // CHECK17-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
18905 // CHECK17-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !21
18906 // CHECK17-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !21
18907 // CHECK17-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
18908 // CHECK17-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
18909 // CHECK17-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
18910 // CHECK17-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !21
18911 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18912 // CHECK17:       omp.body.continue:
18913 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18914 // CHECK17:       omp.inner.for.inc:
18915 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
18916 // CHECK17-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
18917 // CHECK17-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
18918 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
18919 // CHECK17:       omp.inner.for.end:
18920 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18921 // CHECK17:       omp.loop.exit:
18922 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
18923 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18924 // CHECK17-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
18925 // CHECK17-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18926 // CHECK17:       .omp.final.then:
18927 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
18928 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18929 // CHECK17:       .omp.final.done:
18930 // CHECK17-NEXT:    ret void
18931 //
18932 //
18933 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
18934 // CHECK17-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
18935 // CHECK17-NEXT:  entry:
18936 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18937 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
18938 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
18939 // CHECK17-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
18940 // CHECK17-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
18941 // CHECK17-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
18942 // CHECK17-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
18943 // CHECK17-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
18944 // CHECK17-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
18945 // CHECK17-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
18946 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18947 // CHECK17-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
18948 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
18949 // CHECK17-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
18950 // CHECK17-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
18951 // CHECK17-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
18952 // CHECK17-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
18953 // CHECK17-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
18954 // CHECK17-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
18955 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18956 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
18957 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
18958 // CHECK17-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
18959 // CHECK17-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
18960 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
18961 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
18962 // CHECK17-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
18963 // CHECK17-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
18964 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
18965 // CHECK17-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
18966 // CHECK17-NEXT:    store i32 [[TMP8]], i32* [[CONV5]], align 4
18967 // CHECK17-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
18968 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
18969 // CHECK17-NEXT:    ret void
18970 //
18971 //
18972 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..3
18973 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR1]] {
18974 // CHECK17-NEXT:  entry:
18975 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18976 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18977 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18978 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
18979 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
18980 // CHECK17-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
18981 // CHECK17-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
18982 // CHECK17-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
18983 // CHECK17-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
18984 // CHECK17-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
18985 // CHECK17-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
18986 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18987 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18988 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18989 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18990 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18991 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18992 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
18993 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18994 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18995 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18996 // CHECK17-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
18997 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
18998 // CHECK17-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
18999 // CHECK17-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
19000 // CHECK17-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
19001 // CHECK17-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
19002 // CHECK17-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
19003 // CHECK17-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
19004 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
19005 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
19006 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
19007 // CHECK17-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
19008 // CHECK17-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
19009 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
19010 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
19011 // CHECK17-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
19012 // CHECK17-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
19013 // CHECK17-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 0
19014 // CHECK17-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
19015 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19016 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19017 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19018 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19019 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19020 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
19021 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19022 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19023 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
19024 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19025 // CHECK17:       cond.true:
19026 // CHECK17-NEXT:    br label [[COND_END:%.*]]
19027 // CHECK17:       cond.false:
19028 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19029 // CHECK17-NEXT:    br label [[COND_END]]
19030 // CHECK17:       cond.end:
19031 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
19032 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19033 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19034 // CHECK17-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
19035 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19036 // CHECK17:       omp.inner.for.cond:
19037 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19038 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
19039 // CHECK17-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
19040 // CHECK17-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19041 // CHECK17:       omp.inner.for.body:
19042 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19043 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
19044 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19045 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
19046 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !24
19047 // CHECK17-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
19048 // CHECK17-NEXT:    store i32 [[ADD6]], i32* [[CONV]], align 4, !llvm.access.group !24
19049 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
19050 // CHECK17-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24
19051 // CHECK17-NEXT:    [[CONV7:%.*]] = fpext float [[TMP17]] to double
19052 // CHECK17-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
19053 // CHECK17-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
19054 // CHECK17-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24
19055 // CHECK17-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
19056 // CHECK17-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
19057 // CHECK17-NEXT:    [[CONV11:%.*]] = fpext float [[TMP18]] to double
19058 // CHECK17-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
19059 // CHECK17-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
19060 // CHECK17-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
19061 // CHECK17-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
19062 // CHECK17-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i64 0, i64 2
19063 // CHECK17-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
19064 // CHECK17-NEXT:    [[ADD16:%.*]] = fadd double [[TMP19]], 1.000000e+00
19065 // CHECK17-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
19066 // CHECK17-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]]
19067 // CHECK17-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP20]]
19068 // CHECK17-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i64 3
19069 // CHECK17-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
19070 // CHECK17-NEXT:    [[ADD19:%.*]] = fadd double [[TMP21]], 1.000000e+00
19071 // CHECK17-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
19072 // CHECK17-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
19073 // CHECK17-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !24
19074 // CHECK17-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP22]], 1
19075 // CHECK17-NEXT:    store i64 [[ADD20]], i64* [[X]], align 8, !llvm.access.group !24
19076 // CHECK17-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
19077 // CHECK17-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !24
19078 // CHECK17-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP23]] to i32
19079 // CHECK17-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
19080 // CHECK17-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
19081 // CHECK17-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 8, !llvm.access.group !24
19082 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19083 // CHECK17:       omp.body.continue:
19084 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19085 // CHECK17:       omp.inner.for.inc:
19086 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19087 // CHECK17-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP24]], 1
19088 // CHECK17-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19089 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
19090 // CHECK17:       omp.inner.for.end:
19091 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19092 // CHECK17:       omp.loop.exit:
19093 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
19094 // CHECK17-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19095 // CHECK17-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
19096 // CHECK17-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19097 // CHECK17:       .omp.final.then:
19098 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
19099 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19100 // CHECK17:       .omp.final.done:
19101 // CHECK17-NEXT:    ret void
19102 //
19103 //
19104 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
19105 // CHECK17-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
19106 // CHECK17-NEXT:  entry:
19107 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
19108 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19109 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19110 // CHECK17-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
19111 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
19112 // CHECK17-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
19113 // CHECK17-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
19114 // CHECK17-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
19115 // CHECK17-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
19116 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
19117 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19118 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19119 // CHECK17-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
19120 // CHECK17-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
19121 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
19122 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19123 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19124 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
19125 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
19126 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
19127 // CHECK17-NEXT:    [[CONV4:%.*]] = bitcast i64* [[A_CASTED]] to i32*
19128 // CHECK17-NEXT:    store i32 [[TMP1]], i32* [[CONV4]], align 4
19129 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
19130 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 4
19131 // CHECK17-NEXT:    [[CONV5:%.*]] = bitcast i64* [[N_CASTED]] to i32*
19132 // CHECK17-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
19133 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
19134 // CHECK17-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV2]], align 2
19135 // CHECK17-NEXT:    [[CONV6:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
19136 // CHECK17-NEXT:    store i16 [[TMP5]], i16* [[CONV6]], align 2
19137 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AA_CASTED]], align 8
19138 // CHECK17-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
19139 // CHECK17-NEXT:    [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
19140 // CHECK17-NEXT:    store i8 [[TMP7]], i8* [[CONV7]], align 1
19141 // CHECK17-NEXT:    [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
19142 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]])
19143 // CHECK17-NEXT:    ret void
19144 //
19145 //
19146 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..4
19147 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
19148 // CHECK17-NEXT:  entry:
19149 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19150 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19151 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
19152 // CHECK17-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19153 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19154 // CHECK17-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
19155 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
19156 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19157 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19158 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
19159 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
19160 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
19161 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
19162 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19163 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19164 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19165 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19166 // CHECK17-NEXT:    [[I8:%.*]] = alloca i32, align 4
19167 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19168 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19169 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
19170 // CHECK17-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19171 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19172 // CHECK17-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
19173 // CHECK17-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
19174 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
19175 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
19176 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19177 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
19178 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
19179 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
19180 // CHECK17-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
19181 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
19182 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_4]], align 4
19183 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
19184 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19185 // CHECK17-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
19186 // CHECK17-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
19187 // CHECK17-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
19188 // CHECK17-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
19189 // CHECK17-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
19190 // CHECK17-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
19191 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19192 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
19193 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19194 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
19195 // CHECK17-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
19196 // CHECK17-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
19197 // CHECK17:       omp.precond.then:
19198 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19199 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
19200 // CHECK17-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
19201 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19202 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19203 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19204 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
19205 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19206 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19207 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
19208 // CHECK17-NEXT:    [[CMP9:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
19209 // CHECK17-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19210 // CHECK17:       cond.true:
19211 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
19212 // CHECK17-NEXT:    br label [[COND_END:%.*]]
19213 // CHECK17:       cond.false:
19214 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19215 // CHECK17-NEXT:    br label [[COND_END]]
19216 // CHECK17:       cond.end:
19217 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
19218 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19219 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19220 // CHECK17-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
19221 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19222 // CHECK17:       omp.inner.for.cond:
19223 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
19224 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
19225 // CHECK17-NEXT:    [[ADD10:%.*]] = add i32 [[TMP17]], 1
19226 // CHECK17-NEXT:    [[CMP11:%.*]] = icmp ult i32 [[TMP16]], [[ADD10]]
19227 // CHECK17-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19228 // CHECK17:       omp.inner.for.body:
19229 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !27
19230 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
19231 // CHECK17-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
19232 // CHECK17-NEXT:    [[ADD12:%.*]] = add i32 [[TMP18]], [[MUL]]
19233 // CHECK17-NEXT:    store i32 [[ADD12]], i32* [[I8]], align 4, !llvm.access.group !27
19234 // CHECK17-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !27
19235 // CHECK17-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP20]], 1
19236 // CHECK17-NEXT:    store i32 [[ADD13]], i32* [[CONV]], align 4, !llvm.access.group !27
19237 // CHECK17-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV2]], align 2, !llvm.access.group !27
19238 // CHECK17-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP21]] to i32
19239 // CHECK17-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
19240 // CHECK17-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
19241 // CHECK17-NEXT:    store i16 [[CONV16]], i16* [[CONV2]], align 2, !llvm.access.group !27
19242 // CHECK17-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV3]], align 1, !llvm.access.group !27
19243 // CHECK17-NEXT:    [[CONV17:%.*]] = sext i8 [[TMP22]] to i32
19244 // CHECK17-NEXT:    [[ADD18:%.*]] = add nsw i32 [[CONV17]], 1
19245 // CHECK17-NEXT:    [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
19246 // CHECK17-NEXT:    store i8 [[CONV19]], i8* [[CONV3]], align 1, !llvm.access.group !27
19247 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
19248 // CHECK17-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
19249 // CHECK17-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP23]], 1
19250 // CHECK17-NEXT:    store i32 [[ADD20]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
19251 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19252 // CHECK17:       omp.body.continue:
19253 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19254 // CHECK17:       omp.inner.for.inc:
19255 // CHECK17-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
19256 // CHECK17-NEXT:    [[ADD21:%.*]] = add i32 [[TMP24]], 1
19257 // CHECK17-NEXT:    store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
19258 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
19259 // CHECK17:       omp.inner.for.end:
19260 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19261 // CHECK17:       omp.loop.exit:
19262 // CHECK17-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19263 // CHECK17-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
19264 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
19265 // CHECK17-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19266 // CHECK17-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
19267 // CHECK17-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19268 // CHECK17:       .omp.final.then:
19269 // CHECK17-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19270 // CHECK17-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
19271 // CHECK17-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
19272 // CHECK17-NEXT:    [[SUB22:%.*]] = sub i32 [[TMP30]], [[TMP31]]
19273 // CHECK17-NEXT:    [[SUB23:%.*]] = sub i32 [[SUB22]], 1
19274 // CHECK17-NEXT:    [[ADD24:%.*]] = add i32 [[SUB23]], 1
19275 // CHECK17-NEXT:    [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
19276 // CHECK17-NEXT:    [[MUL26:%.*]] = mul i32 [[DIV25]], 1
19277 // CHECK17-NEXT:    [[ADD27:%.*]] = add i32 [[TMP29]], [[MUL26]]
19278 // CHECK17-NEXT:    store i32 [[ADD27]], i32* [[I8]], align 4
19279 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19280 // CHECK17:       .omp.final.done:
19281 // CHECK17-NEXT:    br label [[OMP_PRECOND_END]]
19282 // CHECK17:       omp.precond.end:
19283 // CHECK17-NEXT:    ret void
19284 //
19285 //
19286 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
19287 // CHECK17-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
19288 // CHECK17-NEXT:  entry:
19289 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
19290 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
19291 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
19292 // CHECK17-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
19293 // CHECK17-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
19294 // CHECK17-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
19295 // CHECK17-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
19296 // CHECK17-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
19297 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
19298 // CHECK17-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
19299 // CHECK17-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
19300 // CHECK17-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
19301 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
19302 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
19303 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
19304 // CHECK17-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
19305 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
19306 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
19307 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
19308 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
19309 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
19310 // CHECK17-NEXT:    ret void
19311 //
19312 //
19313 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5
19314 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] {
19315 // CHECK17-NEXT:  entry:
19316 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19317 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19318 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
19319 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
19320 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
19321 // CHECK17-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
19322 // CHECK17-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
19323 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19324 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19325 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19326 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19327 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19328 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19329 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
19330 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19331 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19332 // CHECK17-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
19333 // CHECK17-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
19334 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
19335 // CHECK17-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
19336 // CHECK17-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
19337 // CHECK17-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
19338 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
19339 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
19340 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
19341 // CHECK17-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
19342 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19343 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19344 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19345 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19346 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19347 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
19348 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19349 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19350 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
19351 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19352 // CHECK17:       cond.true:
19353 // CHECK17-NEXT:    br label [[COND_END:%.*]]
19354 // CHECK17:       cond.false:
19355 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19356 // CHECK17-NEXT:    br label [[COND_END]]
19357 // CHECK17:       cond.end:
19358 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
19359 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19360 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19361 // CHECK17-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
19362 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19363 // CHECK17:       omp.inner.for.cond:
19364 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19365 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
19366 // CHECK17-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
19367 // CHECK17-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19368 // CHECK17:       omp.inner.for.body:
19369 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19370 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
19371 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19372 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !30
19373 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !30
19374 // CHECK17-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double
19375 // CHECK17-NEXT:    [[ADD5:%.*]] = fadd double [[CONV4]], 1.500000e+00
19376 // CHECK17-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
19377 // CHECK17-NEXT:    store double [[ADD5]], double* [[A]], align 8, !llvm.access.group !30
19378 // CHECK17-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
19379 // CHECK17-NEXT:    [[TMP13:%.*]] = load double, double* [[A6]], align 8, !llvm.access.group !30
19380 // CHECK17-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
19381 // CHECK17-NEXT:    store double [[INC]], double* [[A6]], align 8, !llvm.access.group !30
19382 // CHECK17-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
19383 // CHECK17-NEXT:    [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]]
19384 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]]
19385 // CHECK17-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
19386 // CHECK17-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !30
19387 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19388 // CHECK17:       omp.body.continue:
19389 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19390 // CHECK17:       omp.inner.for.inc:
19391 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19392 // CHECK17-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP15]], 1
19393 // CHECK17-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
19394 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
19395 // CHECK17:       omp.inner.for.end:
19396 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19397 // CHECK17:       omp.loop.exit:
19398 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
19399 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19400 // CHECK17-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
19401 // CHECK17-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19402 // CHECK17:       .omp.final.then:
19403 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
19404 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19405 // CHECK17:       .omp.final.done:
19406 // CHECK17-NEXT:    ret void
19407 //
19408 //
19409 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
19410 // CHECK17-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
19411 // CHECK17-NEXT:  entry:
19412 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
19413 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19414 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
19415 // CHECK17-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
19416 // CHECK17-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
19417 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
19418 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19419 // CHECK17-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
19420 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
19421 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19422 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
19423 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
19424 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
19425 // CHECK17-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
19426 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
19427 // CHECK17-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
19428 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
19429 // CHECK17-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
19430 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
19431 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
19432 // CHECK17-NEXT:    ret void
19433 //
19434 //
19435 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6
19436 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
19437 // CHECK17-NEXT:  entry:
19438 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19439 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19440 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
19441 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19442 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
19443 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19444 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19445 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19446 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19447 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19448 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19449 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
19450 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19451 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19452 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
19453 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19454 // CHECK17-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
19455 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
19456 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19457 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
19458 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19459 // CHECK17-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19460 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19461 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19462 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19463 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
19464 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19465 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19466 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
19467 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19468 // CHECK17:       cond.true:
19469 // CHECK17-NEXT:    br label [[COND_END:%.*]]
19470 // CHECK17:       cond.false:
19471 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19472 // CHECK17-NEXT:    br label [[COND_END]]
19473 // CHECK17:       cond.end:
19474 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
19475 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19476 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19477 // CHECK17-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
19478 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19479 // CHECK17:       omp.inner.for.cond:
19480 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19481 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
19482 // CHECK17-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
19483 // CHECK17-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19484 // CHECK17:       omp.inner.for.body:
19485 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19486 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
19487 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19488 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
19489 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !33
19490 // CHECK17-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
19491 // CHECK17-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !33
19492 // CHECK17-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !33
19493 // CHECK17-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
19494 // CHECK17-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
19495 // CHECK17-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
19496 // CHECK17-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !33
19497 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
19498 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
19499 // CHECK17-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
19500 // CHECK17-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
19501 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19502 // CHECK17:       omp.body.continue:
19503 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19504 // CHECK17:       omp.inner.for.inc:
19505 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19506 // CHECK17-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP12]], 1
19507 // CHECK17-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
19508 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
19509 // CHECK17:       omp.inner.for.end:
19510 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19511 // CHECK17:       omp.loop.exit:
19512 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
19513 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19514 // CHECK17-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
19515 // CHECK17-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19516 // CHECK17:       .omp.final.then:
19517 // CHECK17-NEXT:    store i32 10, i32* [[I]], align 4
19518 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19519 // CHECK17:       .omp.final.done:
19520 // CHECK17-NEXT:    ret void
19521 //
19522 //
19523 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
19524 // CHECK18-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
19525 // CHECK18-NEXT:  entry:
19526 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19527 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
19528 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
19529 // CHECK18-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
19530 // CHECK18-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
19531 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19532 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
19533 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8
19534 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19535 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
19536 // CHECK18-NEXT:    [[CONV4:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32*
19537 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV3]], align 4
19538 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV4]], align 4
19539 // CHECK18-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
19540 // CHECK18-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
19541 // CHECK18-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
19542 // CHECK18-NEXT:    store i16 [[TMP3]], i16* [[CONV5]], align 2
19543 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
19544 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP4]])
19545 // CHECK18-NEXT:    ret void
19546 //
19547 //
19548 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined.
19549 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
19550 // CHECK18-NEXT:  entry:
19551 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19552 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19553 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19554 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19555 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19556 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19557 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19558 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19559 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19560 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
19561 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19562 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19563 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19564 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19565 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19566 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19567 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19568 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19569 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19570 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
19571 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19572 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19573 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
19574 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19575 // CHECK18:       cond.true:
19576 // CHECK18-NEXT:    br label [[COND_END:%.*]]
19577 // CHECK18:       cond.false:
19578 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19579 // CHECK18-NEXT:    br label [[COND_END]]
19580 // CHECK18:       cond.end:
19581 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
19582 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19583 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19584 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
19585 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19586 // CHECK18:       omp.inner.for.cond:
19587 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
19588 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
19589 // CHECK18-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
19590 // CHECK18-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19591 // CHECK18:       omp.inner.for.body:
19592 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
19593 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
19594 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19595 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
19596 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19597 // CHECK18:       omp.body.continue:
19598 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19599 // CHECK18:       omp.inner.for.inc:
19600 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
19601 // CHECK18-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
19602 // CHECK18-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
19603 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
19604 // CHECK18:       omp.inner.for.end:
19605 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19606 // CHECK18:       omp.loop.exit:
19607 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
19608 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19609 // CHECK18-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
19610 // CHECK18-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19611 // CHECK18:       .omp.final.then:
19612 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
19613 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19614 // CHECK18:       .omp.final.done:
19615 // CHECK18-NEXT:    ret void
19616 //
19617 //
19618 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
19619 // CHECK18-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR0]] {
19620 // CHECK18-NEXT:  entry:
19621 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19622 // CHECK18-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
19623 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19624 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19625 // CHECK18-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
19626 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
19627 // CHECK18-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
19628 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
19629 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
19630 // CHECK18-NEXT:    ret void
19631 //
19632 //
19633 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..1
19634 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
19635 // CHECK18-NEXT:  entry:
19636 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19637 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19638 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19639 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19640 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19641 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19642 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19643 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19644 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19645 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
19646 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19647 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19648 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19649 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19650 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19651 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19652 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19653 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19654 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19655 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
19656 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19657 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19658 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
19659 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19660 // CHECK18:       cond.true:
19661 // CHECK18-NEXT:    br label [[COND_END:%.*]]
19662 // CHECK18:       cond.false:
19663 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19664 // CHECK18-NEXT:    br label [[COND_END]]
19665 // CHECK18:       cond.end:
19666 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
19667 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19668 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19669 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
19670 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19671 // CHECK18:       omp.inner.for.cond:
19672 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
19673 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
19674 // CHECK18-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
19675 // CHECK18-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19676 // CHECK18:       omp.inner.for.body:
19677 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
19678 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
19679 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19680 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18
19681 // CHECK18-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !18
19682 // CHECK18-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
19683 // CHECK18-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
19684 // CHECK18-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
19685 // CHECK18-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !18
19686 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19687 // CHECK18:       omp.body.continue:
19688 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19689 // CHECK18:       omp.inner.for.inc:
19690 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
19691 // CHECK18-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
19692 // CHECK18-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
19693 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
19694 // CHECK18:       omp.inner.for.end:
19695 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19696 // CHECK18:       omp.loop.exit:
19697 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
19698 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19699 // CHECK18-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
19700 // CHECK18-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19701 // CHECK18:       .omp.final.then:
19702 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
19703 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19704 // CHECK18:       .omp.final.done:
19705 // CHECK18-NEXT:    ret void
19706 //
19707 //
19708 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
19709 // CHECK18-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
19710 // CHECK18-NEXT:  entry:
19711 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
19712 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19713 // CHECK18-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
19714 // CHECK18-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
19715 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
19716 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19717 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
19718 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19719 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
19720 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
19721 // CHECK18-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
19722 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
19723 // CHECK18-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
19724 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
19725 // CHECK18-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
19726 // CHECK18-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
19727 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
19728 // CHECK18-NEXT:    ret void
19729 //
19730 //
19731 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..2
19732 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
19733 // CHECK18-NEXT:  entry:
19734 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19735 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19736 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
19737 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19738 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19739 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19740 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19741 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19742 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19743 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19744 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
19745 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19746 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19747 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
19748 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19749 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
19750 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
19751 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19752 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19753 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19754 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19755 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19756 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
19757 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19758 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19759 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
19760 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19761 // CHECK18:       cond.true:
19762 // CHECK18-NEXT:    br label [[COND_END:%.*]]
19763 // CHECK18:       cond.false:
19764 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19765 // CHECK18-NEXT:    br label [[COND_END]]
19766 // CHECK18:       cond.end:
19767 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
19768 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19769 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19770 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
19771 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19772 // CHECK18:       omp.inner.for.cond:
19773 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19774 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
19775 // CHECK18-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
19776 // CHECK18-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19777 // CHECK18:       omp.inner.for.body:
19778 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19779 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
19780 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19781 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
19782 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !21
19783 // CHECK18-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
19784 // CHECK18-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !21
19785 // CHECK18-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !21
19786 // CHECK18-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
19787 // CHECK18-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
19788 // CHECK18-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
19789 // CHECK18-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !21
19790 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19791 // CHECK18:       omp.body.continue:
19792 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19793 // CHECK18:       omp.inner.for.inc:
19794 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19795 // CHECK18-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
19796 // CHECK18-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19797 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
19798 // CHECK18:       omp.inner.for.end:
19799 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19800 // CHECK18:       omp.loop.exit:
19801 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
19802 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19803 // CHECK18-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
19804 // CHECK18-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19805 // CHECK18:       .omp.final.then:
19806 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
19807 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19808 // CHECK18:       .omp.final.done:
19809 // CHECK18-NEXT:    ret void
19810 //
19811 //
19812 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
19813 // CHECK18-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
19814 // CHECK18-NEXT:  entry:
19815 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
19816 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
19817 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
19818 // CHECK18-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
19819 // CHECK18-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
19820 // CHECK18-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
19821 // CHECK18-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
19822 // CHECK18-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
19823 // CHECK18-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
19824 // CHECK18-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
19825 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
19826 // CHECK18-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
19827 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
19828 // CHECK18-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
19829 // CHECK18-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
19830 // CHECK18-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
19831 // CHECK18-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
19832 // CHECK18-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
19833 // CHECK18-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
19834 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
19835 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
19836 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
19837 // CHECK18-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
19838 // CHECK18-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
19839 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
19840 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
19841 // CHECK18-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
19842 // CHECK18-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
19843 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
19844 // CHECK18-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
19845 // CHECK18-NEXT:    store i32 [[TMP8]], i32* [[CONV5]], align 4
19846 // CHECK18-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
19847 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
19848 // CHECK18-NEXT:    ret void
19849 //
19850 //
19851 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..3
19852 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR1]] {
19853 // CHECK18-NEXT:  entry:
19854 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
19855 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
19856 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
19857 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
19858 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
19859 // CHECK18-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
19860 // CHECK18-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
19861 // CHECK18-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
19862 // CHECK18-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
19863 // CHECK18-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
19864 // CHECK18-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
19865 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19866 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19867 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19868 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19869 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19870 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19871 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
19872 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
19873 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
19874 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
19875 // CHECK18-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
19876 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
19877 // CHECK18-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
19878 // CHECK18-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
19879 // CHECK18-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
19880 // CHECK18-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
19881 // CHECK18-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
19882 // CHECK18-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
19883 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
19884 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
19885 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
19886 // CHECK18-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
19887 // CHECK18-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
19888 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
19889 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
19890 // CHECK18-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
19891 // CHECK18-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
19892 // CHECK18-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 0
19893 // CHECK18-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
19894 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19895 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
19896 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19897 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19898 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
19899 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
19900 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19901 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19902 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
19903 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19904 // CHECK18:       cond.true:
19905 // CHECK18-NEXT:    br label [[COND_END:%.*]]
19906 // CHECK18:       cond.false:
19907 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19908 // CHECK18-NEXT:    br label [[COND_END]]
19909 // CHECK18:       cond.end:
19910 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
19911 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19912 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19913 // CHECK18-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
19914 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19915 // CHECK18:       omp.inner.for.cond:
19916 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19917 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
19918 // CHECK18-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
19919 // CHECK18-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19920 // CHECK18:       omp.inner.for.body:
19921 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19922 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
19923 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
19924 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
19925 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !24
19926 // CHECK18-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
19927 // CHECK18-NEXT:    store i32 [[ADD6]], i32* [[CONV]], align 4, !llvm.access.group !24
19928 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
19929 // CHECK18-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24
19930 // CHECK18-NEXT:    [[CONV7:%.*]] = fpext float [[TMP17]] to double
19931 // CHECK18-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
19932 // CHECK18-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
19933 // CHECK18-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24
19934 // CHECK18-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
19935 // CHECK18-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
19936 // CHECK18-NEXT:    [[CONV11:%.*]] = fpext float [[TMP18]] to double
19937 // CHECK18-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
19938 // CHECK18-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
19939 // CHECK18-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
19940 // CHECK18-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
19941 // CHECK18-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i64 0, i64 2
19942 // CHECK18-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
19943 // CHECK18-NEXT:    [[ADD16:%.*]] = fadd double [[TMP19]], 1.000000e+00
19944 // CHECK18-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
19945 // CHECK18-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]]
19946 // CHECK18-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP20]]
19947 // CHECK18-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i64 3
19948 // CHECK18-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
19949 // CHECK18-NEXT:    [[ADD19:%.*]] = fadd double [[TMP21]], 1.000000e+00
19950 // CHECK18-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
19951 // CHECK18-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
19952 // CHECK18-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !24
19953 // CHECK18-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP22]], 1
19954 // CHECK18-NEXT:    store i64 [[ADD20]], i64* [[X]], align 8, !llvm.access.group !24
19955 // CHECK18-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
19956 // CHECK18-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !24
19957 // CHECK18-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP23]] to i32
19958 // CHECK18-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
19959 // CHECK18-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
19960 // CHECK18-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 8, !llvm.access.group !24
19961 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19962 // CHECK18:       omp.body.continue:
19963 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19964 // CHECK18:       omp.inner.for.inc:
19965 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19966 // CHECK18-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP24]], 1
19967 // CHECK18-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19968 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
19969 // CHECK18:       omp.inner.for.end:
19970 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19971 // CHECK18:       omp.loop.exit:
19972 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
19973 // CHECK18-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19974 // CHECK18-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
19975 // CHECK18-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19976 // CHECK18:       .omp.final.then:
19977 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
19978 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19979 // CHECK18:       .omp.final.done:
19980 // CHECK18-NEXT:    ret void
19981 //
19982 //
19983 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
19984 // CHECK18-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
19985 // CHECK18-NEXT:  entry:
19986 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
19987 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
19988 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
19989 // CHECK18-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
19990 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
19991 // CHECK18-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
19992 // CHECK18-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
19993 // CHECK18-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
19994 // CHECK18-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
19995 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
19996 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
19997 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
19998 // CHECK18-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
19999 // CHECK18-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
20000 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20001 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20002 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20003 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
20004 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
20005 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
20006 // CHECK18-NEXT:    [[CONV4:%.*]] = bitcast i64* [[A_CASTED]] to i32*
20007 // CHECK18-NEXT:    store i32 [[TMP1]], i32* [[CONV4]], align 4
20008 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
20009 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 4
20010 // CHECK18-NEXT:    [[CONV5:%.*]] = bitcast i64* [[N_CASTED]] to i32*
20011 // CHECK18-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
20012 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
20013 // CHECK18-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV2]], align 2
20014 // CHECK18-NEXT:    [[CONV6:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
20015 // CHECK18-NEXT:    store i16 [[TMP5]], i16* [[CONV6]], align 2
20016 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AA_CASTED]], align 8
20017 // CHECK18-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
20018 // CHECK18-NEXT:    [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
20019 // CHECK18-NEXT:    store i8 [[TMP7]], i8* [[CONV7]], align 1
20020 // CHECK18-NEXT:    [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
20021 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]])
20022 // CHECK18-NEXT:    ret void
20023 //
20024 //
20025 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..4
20026 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
20027 // CHECK18-NEXT:  entry:
20028 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20029 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20030 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20031 // CHECK18-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
20032 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
20033 // CHECK18-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
20034 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
20035 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20036 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20037 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20038 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
20039 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
20040 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
20041 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20042 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20043 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20044 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20045 // CHECK18-NEXT:    [[I8:%.*]] = alloca i32, align 4
20046 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20047 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20048 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20049 // CHECK18-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
20050 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
20051 // CHECK18-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
20052 // CHECK18-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
20053 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20054 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
20055 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20056 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
20057 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
20058 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
20059 // CHECK18-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
20060 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
20061 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_4]], align 4
20062 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
20063 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20064 // CHECK18-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
20065 // CHECK18-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
20066 // CHECK18-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
20067 // CHECK18-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
20068 // CHECK18-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
20069 // CHECK18-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
20070 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20071 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
20072 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20073 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
20074 // CHECK18-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
20075 // CHECK18-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20076 // CHECK18:       omp.precond.then:
20077 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20078 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
20079 // CHECK18-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
20080 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20081 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20082 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20083 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
20084 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20085 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20086 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
20087 // CHECK18-NEXT:    [[CMP9:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
20088 // CHECK18-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20089 // CHECK18:       cond.true:
20090 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
20091 // CHECK18-NEXT:    br label [[COND_END:%.*]]
20092 // CHECK18:       cond.false:
20093 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20094 // CHECK18-NEXT:    br label [[COND_END]]
20095 // CHECK18:       cond.end:
20096 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
20097 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20098 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20099 // CHECK18-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
20100 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20101 // CHECK18:       omp.inner.for.cond:
20102 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
20103 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
20104 // CHECK18-NEXT:    [[ADD10:%.*]] = add i32 [[TMP17]], 1
20105 // CHECK18-NEXT:    [[CMP11:%.*]] = icmp ult i32 [[TMP16]], [[ADD10]]
20106 // CHECK18-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20107 // CHECK18:       omp.inner.for.body:
20108 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !27
20109 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
20110 // CHECK18-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
20111 // CHECK18-NEXT:    [[ADD12:%.*]] = add i32 [[TMP18]], [[MUL]]
20112 // CHECK18-NEXT:    store i32 [[ADD12]], i32* [[I8]], align 4, !llvm.access.group !27
20113 // CHECK18-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !27
20114 // CHECK18-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP20]], 1
20115 // CHECK18-NEXT:    store i32 [[ADD13]], i32* [[CONV]], align 4, !llvm.access.group !27
20116 // CHECK18-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV2]], align 2, !llvm.access.group !27
20117 // CHECK18-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP21]] to i32
20118 // CHECK18-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
20119 // CHECK18-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
20120 // CHECK18-NEXT:    store i16 [[CONV16]], i16* [[CONV2]], align 2, !llvm.access.group !27
20121 // CHECK18-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV3]], align 1, !llvm.access.group !27
20122 // CHECK18-NEXT:    [[CONV17:%.*]] = sext i8 [[TMP22]] to i32
20123 // CHECK18-NEXT:    [[ADD18:%.*]] = add nsw i32 [[CONV17]], 1
20124 // CHECK18-NEXT:    [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
20125 // CHECK18-NEXT:    store i8 [[CONV19]], i8* [[CONV3]], align 1, !llvm.access.group !27
20126 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
20127 // CHECK18-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
20128 // CHECK18-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP23]], 1
20129 // CHECK18-NEXT:    store i32 [[ADD20]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
20130 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20131 // CHECK18:       omp.body.continue:
20132 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20133 // CHECK18:       omp.inner.for.inc:
20134 // CHECK18-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
20135 // CHECK18-NEXT:    [[ADD21:%.*]] = add i32 [[TMP24]], 1
20136 // CHECK18-NEXT:    store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
20137 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
20138 // CHECK18:       omp.inner.for.end:
20139 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20140 // CHECK18:       omp.loop.exit:
20141 // CHECK18-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20142 // CHECK18-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
20143 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
20144 // CHECK18-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20145 // CHECK18-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
20146 // CHECK18-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20147 // CHECK18:       .omp.final.then:
20148 // CHECK18-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20149 // CHECK18-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
20150 // CHECK18-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20151 // CHECK18-NEXT:    [[SUB22:%.*]] = sub i32 [[TMP30]], [[TMP31]]
20152 // CHECK18-NEXT:    [[SUB23:%.*]] = sub i32 [[SUB22]], 1
20153 // CHECK18-NEXT:    [[ADD24:%.*]] = add i32 [[SUB23]], 1
20154 // CHECK18-NEXT:    [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
20155 // CHECK18-NEXT:    [[MUL26:%.*]] = mul i32 [[DIV25]], 1
20156 // CHECK18-NEXT:    [[ADD27:%.*]] = add i32 [[TMP29]], [[MUL26]]
20157 // CHECK18-NEXT:    store i32 [[ADD27]], i32* [[I8]], align 4
20158 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20159 // CHECK18:       .omp.final.done:
20160 // CHECK18-NEXT:    br label [[OMP_PRECOND_END]]
20161 // CHECK18:       omp.precond.end:
20162 // CHECK18-NEXT:    ret void
20163 //
20164 //
20165 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
20166 // CHECK18-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
20167 // CHECK18-NEXT:  entry:
20168 // CHECK18-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
20169 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
20170 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20171 // CHECK18-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
20172 // CHECK18-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
20173 // CHECK18-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
20174 // CHECK18-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
20175 // CHECK18-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
20176 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20177 // CHECK18-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
20178 // CHECK18-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
20179 // CHECK18-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
20180 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
20181 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20182 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
20183 // CHECK18-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
20184 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
20185 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
20186 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
20187 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
20188 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
20189 // CHECK18-NEXT:    ret void
20190 //
20191 //
20192 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..5
20193 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] {
20194 // CHECK18-NEXT:  entry:
20195 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20196 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20197 // CHECK18-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
20198 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
20199 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20200 // CHECK18-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
20201 // CHECK18-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
20202 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20203 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20204 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20205 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20206 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20207 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20208 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
20209 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20210 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20211 // CHECK18-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
20212 // CHECK18-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
20213 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20214 // CHECK18-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
20215 // CHECK18-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
20216 // CHECK18-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
20217 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
20218 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20219 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
20220 // CHECK18-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
20221 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20222 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
20223 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20224 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20225 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20226 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
20227 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20228 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20229 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
20230 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20231 // CHECK18:       cond.true:
20232 // CHECK18-NEXT:    br label [[COND_END:%.*]]
20233 // CHECK18:       cond.false:
20234 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20235 // CHECK18-NEXT:    br label [[COND_END]]
20236 // CHECK18:       cond.end:
20237 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
20238 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20239 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20240 // CHECK18-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
20241 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20242 // CHECK18:       omp.inner.for.cond:
20243 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
20244 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
20245 // CHECK18-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
20246 // CHECK18-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20247 // CHECK18:       omp.inner.for.body:
20248 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
20249 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
20250 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20251 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !30
20252 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !30
20253 // CHECK18-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double
20254 // CHECK18-NEXT:    [[ADD5:%.*]] = fadd double [[CONV4]], 1.500000e+00
20255 // CHECK18-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
20256 // CHECK18-NEXT:    store double [[ADD5]], double* [[A]], align 8, !llvm.access.group !30
20257 // CHECK18-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
20258 // CHECK18-NEXT:    [[TMP13:%.*]] = load double, double* [[A6]], align 8, !llvm.access.group !30
20259 // CHECK18-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
20260 // CHECK18-NEXT:    store double [[INC]], double* [[A6]], align 8, !llvm.access.group !30
20261 // CHECK18-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
20262 // CHECK18-NEXT:    [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]]
20263 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]]
20264 // CHECK18-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
20265 // CHECK18-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !30
20266 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20267 // CHECK18:       omp.body.continue:
20268 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20269 // CHECK18:       omp.inner.for.inc:
20270 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
20271 // CHECK18-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP15]], 1
20272 // CHECK18-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
20273 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
20274 // CHECK18:       omp.inner.for.end:
20275 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20276 // CHECK18:       omp.loop.exit:
20277 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
20278 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20279 // CHECK18-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
20280 // CHECK18-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20281 // CHECK18:       .omp.final.then:
20282 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
20283 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20284 // CHECK18:       .omp.final.done:
20285 // CHECK18-NEXT:    ret void
20286 //
20287 //
20288 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
20289 // CHECK18-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
20290 // CHECK18-NEXT:  entry:
20291 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20292 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
20293 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
20294 // CHECK18-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
20295 // CHECK18-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
20296 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20297 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
20298 // CHECK18-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
20299 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20300 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20301 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
20302 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
20303 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
20304 // CHECK18-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
20305 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
20306 // CHECK18-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
20307 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
20308 // CHECK18-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
20309 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
20310 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
20311 // CHECK18-NEXT:    ret void
20312 //
20313 //
20314 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6
20315 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
20316 // CHECK18-NEXT:  entry:
20317 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20318 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20319 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20320 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
20321 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
20322 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20323 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20324 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20325 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20326 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20327 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20328 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
20329 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20330 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20331 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20332 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
20333 // CHECK18-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
20334 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20335 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20336 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
20337 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20338 // CHECK18-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
20339 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20340 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20341 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20342 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
20343 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20344 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20345 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
20346 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20347 // CHECK18:       cond.true:
20348 // CHECK18-NEXT:    br label [[COND_END:%.*]]
20349 // CHECK18:       cond.false:
20350 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20351 // CHECK18-NEXT:    br label [[COND_END]]
20352 // CHECK18:       cond.end:
20353 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
20354 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20355 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20356 // CHECK18-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
20357 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20358 // CHECK18:       omp.inner.for.cond:
20359 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
20360 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
20361 // CHECK18-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
20362 // CHECK18-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20363 // CHECK18:       omp.inner.for.body:
20364 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
20365 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
20366 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20367 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33
20368 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !33
20369 // CHECK18-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
20370 // CHECK18-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !33
20371 // CHECK18-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !33
20372 // CHECK18-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
20373 // CHECK18-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
20374 // CHECK18-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
20375 // CHECK18-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !33
20376 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
20377 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
20378 // CHECK18-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
20379 // CHECK18-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
20380 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20381 // CHECK18:       omp.body.continue:
20382 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20383 // CHECK18:       omp.inner.for.inc:
20384 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
20385 // CHECK18-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP12]], 1
20386 // CHECK18-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
20387 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
20388 // CHECK18:       omp.inner.for.end:
20389 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20390 // CHECK18:       omp.loop.exit:
20391 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
20392 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20393 // CHECK18-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
20394 // CHECK18-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20395 // CHECK18:       .omp.final.then:
20396 // CHECK18-NEXT:    store i32 10, i32* [[I]], align 4
20397 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20398 // CHECK18:       .omp.final.done:
20399 // CHECK18-NEXT:    ret void
20400 //
20401 //
20402 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
20403 // CHECK19-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
20404 // CHECK19-NEXT:  entry:
20405 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20406 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
20407 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
20408 // CHECK19-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
20409 // CHECK19-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
20410 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20411 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
20412 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
20413 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20414 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
20415 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
20416 // CHECK19-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
20417 // CHECK19-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
20418 // CHECK19-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
20419 // CHECK19-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
20420 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
20421 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP4]])
20422 // CHECK19-NEXT:    ret void
20423 //
20424 //
20425 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined.
20426 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
20427 // CHECK19-NEXT:  entry:
20428 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20429 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20430 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20431 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20432 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20433 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20434 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20435 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20436 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20437 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
20438 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20439 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20440 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20441 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20442 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20443 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
20444 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20445 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20446 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20447 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
20448 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20449 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20450 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
20451 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20452 // CHECK19:       cond.true:
20453 // CHECK19-NEXT:    br label [[COND_END:%.*]]
20454 // CHECK19:       cond.false:
20455 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20456 // CHECK19-NEXT:    br label [[COND_END]]
20457 // CHECK19:       cond.end:
20458 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
20459 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20460 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20461 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
20462 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20463 // CHECK19:       omp.inner.for.cond:
20464 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
20465 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
20466 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
20467 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20468 // CHECK19:       omp.inner.for.body:
20469 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
20470 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
20471 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20472 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
20473 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20474 // CHECK19:       omp.body.continue:
20475 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20476 // CHECK19:       omp.inner.for.inc:
20477 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
20478 // CHECK19-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
20479 // CHECK19-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
20480 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
20481 // CHECK19:       omp.inner.for.end:
20482 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20483 // CHECK19:       omp.loop.exit:
20484 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
20485 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20486 // CHECK19-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
20487 // CHECK19-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20488 // CHECK19:       .omp.final.then:
20489 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
20490 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20491 // CHECK19:       .omp.final.done:
20492 // CHECK19-NEXT:    ret void
20493 //
20494 //
20495 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
20496 // CHECK19-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR0]] {
20497 // CHECK19-NEXT:  entry:
20498 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20499 // CHECK19-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
20500 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20501 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20502 // CHECK19-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
20503 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
20504 // CHECK19-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
20505 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
20506 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]])
20507 // CHECK19-NEXT:    ret void
20508 //
20509 //
20510 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..1
20511 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
20512 // CHECK19-NEXT:  entry:
20513 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20514 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20515 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20516 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20517 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20518 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20519 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20520 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20521 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20522 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
20523 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20524 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20525 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20526 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20527 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20528 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
20529 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20530 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20531 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20532 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
20533 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20534 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20535 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
20536 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20537 // CHECK19:       cond.true:
20538 // CHECK19-NEXT:    br label [[COND_END:%.*]]
20539 // CHECK19:       cond.false:
20540 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20541 // CHECK19-NEXT:    br label [[COND_END]]
20542 // CHECK19:       cond.end:
20543 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
20544 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20545 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20546 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
20547 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20548 // CHECK19:       omp.inner.for.cond:
20549 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
20550 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
20551 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
20552 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20553 // CHECK19:       omp.inner.for.body:
20554 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
20555 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
20556 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20557 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19
20558 // CHECK19-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !19
20559 // CHECK19-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
20560 // CHECK19-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
20561 // CHECK19-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
20562 // CHECK19-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !19
20563 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20564 // CHECK19:       omp.body.continue:
20565 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20566 // CHECK19:       omp.inner.for.inc:
20567 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
20568 // CHECK19-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
20569 // CHECK19-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
20570 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
20571 // CHECK19:       omp.inner.for.end:
20572 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20573 // CHECK19:       omp.loop.exit:
20574 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
20575 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20576 // CHECK19-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
20577 // CHECK19-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20578 // CHECK19:       .omp.final.then:
20579 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
20580 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20581 // CHECK19:       .omp.final.done:
20582 // CHECK19-NEXT:    ret void
20583 //
20584 //
20585 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
20586 // CHECK19-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
20587 // CHECK19-NEXT:  entry:
20588 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20589 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20590 // CHECK19-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
20591 // CHECK19-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
20592 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20593 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20594 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20595 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
20596 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
20597 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
20598 // CHECK19-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
20599 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
20600 // CHECK19-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
20601 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
20602 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
20603 // CHECK19-NEXT:    ret void
20604 //
20605 //
20606 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..2
20607 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
20608 // CHECK19-NEXT:  entry:
20609 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20610 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20611 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20612 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20613 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20614 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20615 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20616 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20617 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20618 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20619 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
20620 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20621 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20622 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20623 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20624 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20625 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20626 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
20627 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20628 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20629 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20630 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
20631 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20632 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20633 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
20634 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20635 // CHECK19:       cond.true:
20636 // CHECK19-NEXT:    br label [[COND_END:%.*]]
20637 // CHECK19:       cond.false:
20638 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20639 // CHECK19-NEXT:    br label [[COND_END]]
20640 // CHECK19:       cond.end:
20641 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
20642 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20643 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20644 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
20645 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20646 // CHECK19:       omp.inner.for.cond:
20647 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
20648 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
20649 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
20650 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20651 // CHECK19:       omp.inner.for.body:
20652 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
20653 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
20654 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20655 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22
20656 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !22
20657 // CHECK19-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
20658 // CHECK19-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !22
20659 // CHECK19-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !22
20660 // CHECK19-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP9]] to i32
20661 // CHECK19-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
20662 // CHECK19-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
20663 // CHECK19-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !22
20664 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20665 // CHECK19:       omp.body.continue:
20666 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20667 // CHECK19:       omp.inner.for.inc:
20668 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
20669 // CHECK19-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1
20670 // CHECK19-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
20671 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
20672 // CHECK19:       omp.inner.for.end:
20673 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20674 // CHECK19:       omp.loop.exit:
20675 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
20676 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20677 // CHECK19-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
20678 // CHECK19-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20679 // CHECK19:       .omp.final.then:
20680 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
20681 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20682 // CHECK19:       .omp.final.done:
20683 // CHECK19-NEXT:    ret void
20684 //
20685 //
20686 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
20687 // CHECK19-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
20688 // CHECK19-NEXT:  entry:
20689 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20690 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
20691 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
20692 // CHECK19-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
20693 // CHECK19-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
20694 // CHECK19-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
20695 // CHECK19-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
20696 // CHECK19-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
20697 // CHECK19-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
20698 // CHECK19-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
20699 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20700 // CHECK19-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
20701 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
20702 // CHECK19-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
20703 // CHECK19-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
20704 // CHECK19-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
20705 // CHECK19-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
20706 // CHECK19-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
20707 // CHECK19-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
20708 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
20709 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
20710 // CHECK19-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
20711 // CHECK19-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
20712 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
20713 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
20714 // CHECK19-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
20715 // CHECK19-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
20716 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
20717 // CHECK19-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
20718 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
20719 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
20720 // CHECK19-NEXT:    ret void
20721 //
20722 //
20723 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..3
20724 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR1]] {
20725 // CHECK19-NEXT:  entry:
20726 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20727 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20728 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20729 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
20730 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
20731 // CHECK19-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
20732 // CHECK19-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
20733 // CHECK19-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
20734 // CHECK19-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
20735 // CHECK19-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
20736 // CHECK19-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
20737 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20738 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20739 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20740 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20741 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20742 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20743 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
20744 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20745 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20746 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20747 // CHECK19-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
20748 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
20749 // CHECK19-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
20750 // CHECK19-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
20751 // CHECK19-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
20752 // CHECK19-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
20753 // CHECK19-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
20754 // CHECK19-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
20755 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
20756 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
20757 // CHECK19-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
20758 // CHECK19-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
20759 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
20760 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
20761 // CHECK19-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
20762 // CHECK19-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
20763 // CHECK19-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 0
20764 // CHECK19-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
20765 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20766 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
20767 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20768 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20769 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20770 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
20771 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20772 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20773 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
20774 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20775 // CHECK19:       cond.true:
20776 // CHECK19-NEXT:    br label [[COND_END:%.*]]
20777 // CHECK19:       cond.false:
20778 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20779 // CHECK19-NEXT:    br label [[COND_END]]
20780 // CHECK19:       cond.end:
20781 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
20782 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20783 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20784 // CHECK19-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
20785 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20786 // CHECK19:       omp.inner.for.cond:
20787 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
20788 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
20789 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
20790 // CHECK19-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20791 // CHECK19:       omp.inner.for.body:
20792 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
20793 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
20794 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
20795 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
20796 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !25
20797 // CHECK19-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
20798 // CHECK19-NEXT:    store i32 [[ADD6]], i32* [[A_ADDR]], align 4, !llvm.access.group !25
20799 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
20800 // CHECK19-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !25
20801 // CHECK19-NEXT:    [[CONV:%.*]] = fpext float [[TMP17]] to double
20802 // CHECK19-NEXT:    [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00
20803 // CHECK19-NEXT:    [[CONV8:%.*]] = fptrunc double [[ADD7]] to float
20804 // CHECK19-NEXT:    store float [[CONV8]], float* [[ARRAYIDX]], align 4, !llvm.access.group !25
20805 // CHECK19-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
20806 // CHECK19-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4, !llvm.access.group !25
20807 // CHECK19-NEXT:    [[CONV10:%.*]] = fpext float [[TMP18]] to double
20808 // CHECK19-NEXT:    [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00
20809 // CHECK19-NEXT:    [[CONV12:%.*]] = fptrunc double [[ADD11]] to float
20810 // CHECK19-NEXT:    store float [[CONV12]], float* [[ARRAYIDX9]], align 4, !llvm.access.group !25
20811 // CHECK19-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
20812 // CHECK19-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX13]], i32 0, i32 2
20813 // CHECK19-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !25
20814 // CHECK19-NEXT:    [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00
20815 // CHECK19-NEXT:    store double [[ADD15]], double* [[ARRAYIDX14]], align 8, !llvm.access.group !25
20816 // CHECK19-NEXT:    [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]]
20817 // CHECK19-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP20]]
20818 // CHECK19-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX16]], i32 3
20819 // CHECK19-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !25
20820 // CHECK19-NEXT:    [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00
20821 // CHECK19-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !25
20822 // CHECK19-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
20823 // CHECK19-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !25
20824 // CHECK19-NEXT:    [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1
20825 // CHECK19-NEXT:    store i64 [[ADD19]], i64* [[X]], align 4, !llvm.access.group !25
20826 // CHECK19-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
20827 // CHECK19-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !25
20828 // CHECK19-NEXT:    [[CONV20:%.*]] = sext i8 [[TMP23]] to i32
20829 // CHECK19-NEXT:    [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1
20830 // CHECK19-NEXT:    [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8
20831 // CHECK19-NEXT:    store i8 [[CONV22]], i8* [[Y]], align 4, !llvm.access.group !25
20832 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20833 // CHECK19:       omp.body.continue:
20834 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20835 // CHECK19:       omp.inner.for.inc:
20836 // CHECK19-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
20837 // CHECK19-NEXT:    [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1
20838 // CHECK19-NEXT:    store i32 [[ADD23]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
20839 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
20840 // CHECK19:       omp.inner.for.end:
20841 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20842 // CHECK19:       omp.loop.exit:
20843 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
20844 // CHECK19-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20845 // CHECK19-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
20846 // CHECK19-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20847 // CHECK19:       .omp.final.then:
20848 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
20849 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20850 // CHECK19:       .omp.final.done:
20851 // CHECK19-NEXT:    ret void
20852 //
20853 //
20854 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
20855 // CHECK19-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
20856 // CHECK19-NEXT:  entry:
20857 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20858 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
20859 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20860 // CHECK19-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
20861 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
20862 // CHECK19-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
20863 // CHECK19-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
20864 // CHECK19-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
20865 // CHECK19-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
20866 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20867 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
20868 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20869 // CHECK19-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
20870 // CHECK19-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
20871 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20872 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
20873 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
20874 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
20875 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
20876 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
20877 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
20878 // CHECK19-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
20879 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
20880 // CHECK19-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
20881 // CHECK19-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
20882 // CHECK19-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
20883 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AA_CASTED]], align 4
20884 // CHECK19-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV1]], align 1
20885 // CHECK19-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
20886 // CHECK19-NEXT:    store i8 [[TMP7]], i8* [[CONV3]], align 1
20887 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
20888 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]])
20889 // CHECK19-NEXT:    ret void
20890 //
20891 //
20892 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..4
20893 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
20894 // CHECK19-NEXT:  entry:
20895 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20896 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20897 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20898 // CHECK19-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
20899 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20900 // CHECK19-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
20901 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
20902 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20903 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20904 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
20905 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
20906 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
20907 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
20908 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20909 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20910 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20911 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20912 // CHECK19-NEXT:    [[I6:%.*]] = alloca i32, align 4
20913 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20914 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20915 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20916 // CHECK19-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
20917 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20918 // CHECK19-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
20919 // CHECK19-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
20920 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20921 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
20922 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
20923 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
20924 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
20925 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
20926 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
20927 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20928 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20929 // CHECK19-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
20930 // CHECK19-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
20931 // CHECK19-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
20932 // CHECK19-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
20933 // CHECK19-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
20934 // CHECK19-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
20935 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20936 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
20937 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
20938 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
20939 // CHECK19-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
20940 // CHECK19-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
20941 // CHECK19:       omp.precond.then:
20942 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20943 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20944 // CHECK19-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
20945 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20946 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20947 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20948 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
20949 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20950 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20951 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20952 // CHECK19-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
20953 // CHECK19-NEXT:    br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20954 // CHECK19:       cond.true:
20955 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
20956 // CHECK19-NEXT:    br label [[COND_END:%.*]]
20957 // CHECK19:       cond.false:
20958 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20959 // CHECK19-NEXT:    br label [[COND_END]]
20960 // CHECK19:       cond.end:
20961 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
20962 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20963 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20964 // CHECK19-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
20965 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20966 // CHECK19:       omp.inner.for.cond:
20967 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
20968 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
20969 // CHECK19-NEXT:    [[ADD8:%.*]] = add i32 [[TMP17]], 1
20970 // CHECK19-NEXT:    [[CMP9:%.*]] = icmp ult i32 [[TMP16]], [[ADD8]]
20971 // CHECK19-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20972 // CHECK19:       omp.inner.for.body:
20973 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !28
20974 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
20975 // CHECK19-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
20976 // CHECK19-NEXT:    [[ADD10:%.*]] = add i32 [[TMP18]], [[MUL]]
20977 // CHECK19-NEXT:    store i32 [[ADD10]], i32* [[I6]], align 4, !llvm.access.group !28
20978 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !28
20979 // CHECK19-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP20]], 1
20980 // CHECK19-NEXT:    store i32 [[ADD11]], i32* [[A_ADDR]], align 4, !llvm.access.group !28
20981 // CHECK19-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !28
20982 // CHECK19-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP21]] to i32
20983 // CHECK19-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
20984 // CHECK19-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
20985 // CHECK19-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !28
20986 // CHECK19-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV1]], align 1, !llvm.access.group !28
20987 // CHECK19-NEXT:    [[CONV15:%.*]] = sext i8 [[TMP22]] to i32
20988 // CHECK19-NEXT:    [[ADD16:%.*]] = add nsw i32 [[CONV15]], 1
20989 // CHECK19-NEXT:    [[CONV17:%.*]] = trunc i32 [[ADD16]] to i8
20990 // CHECK19-NEXT:    store i8 [[CONV17]], i8* [[CONV1]], align 1, !llvm.access.group !28
20991 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
20992 // CHECK19-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
20993 // CHECK19-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP23]], 1
20994 // CHECK19-NEXT:    store i32 [[ADD18]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
20995 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20996 // CHECK19:       omp.body.continue:
20997 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20998 // CHECK19:       omp.inner.for.inc:
20999 // CHECK19-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
21000 // CHECK19-NEXT:    [[ADD19:%.*]] = add i32 [[TMP24]], 1
21001 // CHECK19-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
21002 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
21003 // CHECK19:       omp.inner.for.end:
21004 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21005 // CHECK19:       omp.loop.exit:
21006 // CHECK19-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21007 // CHECK19-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
21008 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
21009 // CHECK19-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21010 // CHECK19-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
21011 // CHECK19-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21012 // CHECK19:       .omp.final.then:
21013 // CHECK19-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21014 // CHECK19-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21015 // CHECK19-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21016 // CHECK19-NEXT:    [[SUB20:%.*]] = sub i32 [[TMP30]], [[TMP31]]
21017 // CHECK19-NEXT:    [[SUB21:%.*]] = sub i32 [[SUB20]], 1
21018 // CHECK19-NEXT:    [[ADD22:%.*]] = add i32 [[SUB21]], 1
21019 // CHECK19-NEXT:    [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
21020 // CHECK19-NEXT:    [[MUL24:%.*]] = mul i32 [[DIV23]], 1
21021 // CHECK19-NEXT:    [[ADD25:%.*]] = add i32 [[TMP29]], [[MUL24]]
21022 // CHECK19-NEXT:    store i32 [[ADD25]], i32* [[I6]], align 4
21023 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21024 // CHECK19:       .omp.final.done:
21025 // CHECK19-NEXT:    br label [[OMP_PRECOND_END]]
21026 // CHECK19:       omp.precond.end:
21027 // CHECK19-NEXT:    ret void
21028 //
21029 //
21030 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
21031 // CHECK19-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
21032 // CHECK19-NEXT:  entry:
21033 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
21034 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
21035 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
21036 // CHECK19-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
21037 // CHECK19-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
21038 // CHECK19-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
21039 // CHECK19-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
21040 // CHECK19-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
21041 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
21042 // CHECK19-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
21043 // CHECK19-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
21044 // CHECK19-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
21045 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
21046 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
21047 // CHECK19-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
21048 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
21049 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
21050 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
21051 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
21052 // CHECK19-NEXT:    ret void
21053 //
21054 //
21055 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5
21056 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] {
21057 // CHECK19-NEXT:  entry:
21058 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21059 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21060 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
21061 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
21062 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
21063 // CHECK19-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
21064 // CHECK19-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
21065 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21066 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21067 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21068 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21069 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21070 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21071 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
21072 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21073 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21074 // CHECK19-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
21075 // CHECK19-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
21076 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
21077 // CHECK19-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
21078 // CHECK19-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
21079 // CHECK19-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
21080 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
21081 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
21082 // CHECK19-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
21083 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21084 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21085 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21086 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21087 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21088 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
21089 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21090 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21091 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
21092 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21093 // CHECK19:       cond.true:
21094 // CHECK19-NEXT:    br label [[COND_END:%.*]]
21095 // CHECK19:       cond.false:
21096 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21097 // CHECK19-NEXT:    br label [[COND_END]]
21098 // CHECK19:       cond.end:
21099 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
21100 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21101 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21102 // CHECK19-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
21103 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21104 // CHECK19:       omp.inner.for.cond:
21105 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
21106 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
21107 // CHECK19-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
21108 // CHECK19-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21109 // CHECK19:       omp.inner.for.body:
21110 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
21111 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
21112 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21113 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !31
21114 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !31
21115 // CHECK19-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP12]] to double
21116 // CHECK19-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
21117 // CHECK19-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
21118 // CHECK19-NEXT:    store double [[ADD4]], double* [[A]], align 4, !llvm.access.group !31
21119 // CHECK19-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
21120 // CHECK19-NEXT:    [[TMP13:%.*]] = load double, double* [[A5]], align 4, !llvm.access.group !31
21121 // CHECK19-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
21122 // CHECK19-NEXT:    store double [[INC]], double* [[A5]], align 4, !llvm.access.group !31
21123 // CHECK19-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
21124 // CHECK19-NEXT:    [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]]
21125 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]]
21126 // CHECK19-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
21127 // CHECK19-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !31
21128 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21129 // CHECK19:       omp.body.continue:
21130 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21131 // CHECK19:       omp.inner.for.inc:
21132 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
21133 // CHECK19-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1
21134 // CHECK19-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
21135 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
21136 // CHECK19:       omp.inner.for.end:
21137 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21138 // CHECK19:       omp.loop.exit:
21139 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
21140 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21141 // CHECK19-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
21142 // CHECK19-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21143 // CHECK19:       .omp.final.then:
21144 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
21145 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21146 // CHECK19:       .omp.final.done:
21147 // CHECK19-NEXT:    ret void
21148 //
21149 //
21150 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
21151 // CHECK19-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
21152 // CHECK19-NEXT:  entry:
21153 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
21154 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21155 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
21156 // CHECK19-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
21157 // CHECK19-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
21158 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
21159 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21160 // CHECK19-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
21161 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21162 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
21163 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
21164 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
21165 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
21166 // CHECK19-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
21167 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
21168 // CHECK19-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
21169 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
21170 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
21171 // CHECK19-NEXT:    ret void
21172 //
21173 //
21174 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6
21175 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
21176 // CHECK19-NEXT:  entry:
21177 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21178 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21179 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
21180 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21181 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
21182 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21183 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21184 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21185 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21186 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21187 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21188 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
21189 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21190 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21191 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
21192 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21193 // CHECK19-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
21194 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21195 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
21196 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21197 // CHECK19-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21198 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21199 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21200 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21201 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
21202 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21203 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21204 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
21205 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21206 // CHECK19:       cond.true:
21207 // CHECK19-NEXT:    br label [[COND_END:%.*]]
21208 // CHECK19:       cond.false:
21209 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21210 // CHECK19-NEXT:    br label [[COND_END]]
21211 // CHECK19:       cond.end:
21212 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
21213 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21214 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21215 // CHECK19-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
21216 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21217 // CHECK19:       omp.inner.for.cond:
21218 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
21219 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
21220 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
21221 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21222 // CHECK19:       omp.inner.for.body:
21223 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
21224 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
21225 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21226 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !34
21227 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !34
21228 // CHECK19-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
21229 // CHECK19-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !34
21230 // CHECK19-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !34
21231 // CHECK19-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
21232 // CHECK19-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
21233 // CHECK19-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
21234 // CHECK19-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !34
21235 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
21236 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !34
21237 // CHECK19-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
21238 // CHECK19-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !34
21239 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21240 // CHECK19:       omp.body.continue:
21241 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21242 // CHECK19:       omp.inner.for.inc:
21243 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
21244 // CHECK19-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
21245 // CHECK19-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
21246 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
21247 // CHECK19:       omp.inner.for.end:
21248 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21249 // CHECK19:       omp.loop.exit:
21250 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
21251 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21252 // CHECK19-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
21253 // CHECK19-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21254 // CHECK19:       .omp.final.then:
21255 // CHECK19-NEXT:    store i32 10, i32* [[I]], align 4
21256 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21257 // CHECK19:       .omp.final.done:
21258 // CHECK19-NEXT:    ret void
21259 //
21260 //
21261 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
21262 // CHECK20-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
21263 // CHECK20-NEXT:  entry:
21264 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21265 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
21266 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
21267 // CHECK20-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
21268 // CHECK20-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
21269 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21270 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
21271 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
21272 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21273 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
21274 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
21275 // CHECK20-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
21276 // CHECK20-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
21277 // CHECK20-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
21278 // CHECK20-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
21279 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
21280 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP4]])
21281 // CHECK20-NEXT:    ret void
21282 //
21283 //
21284 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined.
21285 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
21286 // CHECK20-NEXT:  entry:
21287 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21288 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21289 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21290 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21291 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21292 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21293 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21294 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21295 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21296 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
21297 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21298 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21299 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21300 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21301 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21302 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21303 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21304 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21305 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21306 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
21307 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21308 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21309 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
21310 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21311 // CHECK20:       cond.true:
21312 // CHECK20-NEXT:    br label [[COND_END:%.*]]
21313 // CHECK20:       cond.false:
21314 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21315 // CHECK20-NEXT:    br label [[COND_END]]
21316 // CHECK20:       cond.end:
21317 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
21318 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21319 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21320 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
21321 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21322 // CHECK20:       omp.inner.for.cond:
21323 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
21324 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
21325 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
21326 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21327 // CHECK20:       omp.inner.for.body:
21328 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
21329 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
21330 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21331 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
21332 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21333 // CHECK20:       omp.body.continue:
21334 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21335 // CHECK20:       omp.inner.for.inc:
21336 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
21337 // CHECK20-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
21338 // CHECK20-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
21339 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
21340 // CHECK20:       omp.inner.for.end:
21341 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21342 // CHECK20:       omp.loop.exit:
21343 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
21344 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21345 // CHECK20-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
21346 // CHECK20-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21347 // CHECK20:       .omp.final.then:
21348 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
21349 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21350 // CHECK20:       .omp.final.done:
21351 // CHECK20-NEXT:    ret void
21352 //
21353 //
21354 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
21355 // CHECK20-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR0]] {
21356 // CHECK20-NEXT:  entry:
21357 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21358 // CHECK20-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
21359 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21360 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21361 // CHECK20-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
21362 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
21363 // CHECK20-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
21364 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
21365 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]])
21366 // CHECK20-NEXT:    ret void
21367 //
21368 //
21369 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..1
21370 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
21371 // CHECK20-NEXT:  entry:
21372 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21373 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21374 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21375 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21376 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21377 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21378 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21379 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21380 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21381 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
21382 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21383 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21384 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21385 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21386 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21387 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21388 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21389 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21390 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21391 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
21392 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21393 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21394 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
21395 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21396 // CHECK20:       cond.true:
21397 // CHECK20-NEXT:    br label [[COND_END:%.*]]
21398 // CHECK20:       cond.false:
21399 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21400 // CHECK20-NEXT:    br label [[COND_END]]
21401 // CHECK20:       cond.end:
21402 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
21403 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21404 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21405 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
21406 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21407 // CHECK20:       omp.inner.for.cond:
21408 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
21409 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
21410 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
21411 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21412 // CHECK20:       omp.inner.for.body:
21413 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
21414 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
21415 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21416 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19
21417 // CHECK20-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !19
21418 // CHECK20-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
21419 // CHECK20-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
21420 // CHECK20-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
21421 // CHECK20-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !19
21422 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21423 // CHECK20:       omp.body.continue:
21424 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21425 // CHECK20:       omp.inner.for.inc:
21426 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
21427 // CHECK20-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
21428 // CHECK20-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
21429 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
21430 // CHECK20:       omp.inner.for.end:
21431 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21432 // CHECK20:       omp.loop.exit:
21433 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
21434 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21435 // CHECK20-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
21436 // CHECK20-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21437 // CHECK20:       .omp.final.then:
21438 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
21439 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21440 // CHECK20:       .omp.final.done:
21441 // CHECK20-NEXT:    ret void
21442 //
21443 //
21444 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
21445 // CHECK20-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
21446 // CHECK20-NEXT:  entry:
21447 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
21448 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21449 // CHECK20-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
21450 // CHECK20-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
21451 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
21452 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21453 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21454 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
21455 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
21456 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
21457 // CHECK20-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
21458 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
21459 // CHECK20-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
21460 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
21461 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
21462 // CHECK20-NEXT:    ret void
21463 //
21464 //
21465 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..2
21466 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
21467 // CHECK20-NEXT:  entry:
21468 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21469 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21470 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
21471 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21472 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21473 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21474 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21475 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21476 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21477 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21478 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
21479 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21480 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21481 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
21482 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21483 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21484 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21485 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21486 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21487 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21488 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21489 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
21490 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21491 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21492 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
21493 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21494 // CHECK20:       cond.true:
21495 // CHECK20-NEXT:    br label [[COND_END:%.*]]
21496 // CHECK20:       cond.false:
21497 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21498 // CHECK20-NEXT:    br label [[COND_END]]
21499 // CHECK20:       cond.end:
21500 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
21501 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21502 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21503 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
21504 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21505 // CHECK20:       omp.inner.for.cond:
21506 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
21507 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
21508 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
21509 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21510 // CHECK20:       omp.inner.for.body:
21511 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
21512 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
21513 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21514 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22
21515 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !22
21516 // CHECK20-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
21517 // CHECK20-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !22
21518 // CHECK20-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !22
21519 // CHECK20-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP9]] to i32
21520 // CHECK20-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
21521 // CHECK20-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
21522 // CHECK20-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !22
21523 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21524 // CHECK20:       omp.body.continue:
21525 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21526 // CHECK20:       omp.inner.for.inc:
21527 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
21528 // CHECK20-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1
21529 // CHECK20-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
21530 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
21531 // CHECK20:       omp.inner.for.end:
21532 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21533 // CHECK20:       omp.loop.exit:
21534 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
21535 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21536 // CHECK20-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
21537 // CHECK20-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21538 // CHECK20:       .omp.final.then:
21539 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
21540 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21541 // CHECK20:       .omp.final.done:
21542 // CHECK20-NEXT:    ret void
21543 //
21544 //
21545 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
21546 // CHECK20-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
21547 // CHECK20-NEXT:  entry:
21548 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
21549 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
21550 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
21551 // CHECK20-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
21552 // CHECK20-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
21553 // CHECK20-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
21554 // CHECK20-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
21555 // CHECK20-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
21556 // CHECK20-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
21557 // CHECK20-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
21558 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
21559 // CHECK20-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
21560 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
21561 // CHECK20-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
21562 // CHECK20-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
21563 // CHECK20-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
21564 // CHECK20-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
21565 // CHECK20-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
21566 // CHECK20-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
21567 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
21568 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
21569 // CHECK20-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
21570 // CHECK20-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
21571 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
21572 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
21573 // CHECK20-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
21574 // CHECK20-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
21575 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
21576 // CHECK20-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
21577 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
21578 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
21579 // CHECK20-NEXT:    ret void
21580 //
21581 //
21582 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..3
21583 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR1]] {
21584 // CHECK20-NEXT:  entry:
21585 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21586 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21587 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
21588 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
21589 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
21590 // CHECK20-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
21591 // CHECK20-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
21592 // CHECK20-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
21593 // CHECK20-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
21594 // CHECK20-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
21595 // CHECK20-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
21596 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21597 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21598 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21599 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21600 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21601 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21602 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
21603 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21604 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21605 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
21606 // CHECK20-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
21607 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
21608 // CHECK20-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
21609 // CHECK20-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
21610 // CHECK20-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
21611 // CHECK20-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
21612 // CHECK20-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
21613 // CHECK20-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
21614 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
21615 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
21616 // CHECK20-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
21617 // CHECK20-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
21618 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
21619 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
21620 // CHECK20-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
21621 // CHECK20-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
21622 // CHECK20-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 0
21623 // CHECK20-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
21624 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21625 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21626 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21627 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21628 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21629 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
21630 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21631 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21632 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
21633 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21634 // CHECK20:       cond.true:
21635 // CHECK20-NEXT:    br label [[COND_END:%.*]]
21636 // CHECK20:       cond.false:
21637 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21638 // CHECK20-NEXT:    br label [[COND_END]]
21639 // CHECK20:       cond.end:
21640 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
21641 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21642 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21643 // CHECK20-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
21644 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21645 // CHECK20:       omp.inner.for.cond:
21646 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
21647 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
21648 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
21649 // CHECK20-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21650 // CHECK20:       omp.inner.for.body:
21651 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
21652 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
21653 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21654 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
21655 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !25
21656 // CHECK20-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
21657 // CHECK20-NEXT:    store i32 [[ADD6]], i32* [[A_ADDR]], align 4, !llvm.access.group !25
21658 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
21659 // CHECK20-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !25
21660 // CHECK20-NEXT:    [[CONV:%.*]] = fpext float [[TMP17]] to double
21661 // CHECK20-NEXT:    [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00
21662 // CHECK20-NEXT:    [[CONV8:%.*]] = fptrunc double [[ADD7]] to float
21663 // CHECK20-NEXT:    store float [[CONV8]], float* [[ARRAYIDX]], align 4, !llvm.access.group !25
21664 // CHECK20-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
21665 // CHECK20-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4, !llvm.access.group !25
21666 // CHECK20-NEXT:    [[CONV10:%.*]] = fpext float [[TMP18]] to double
21667 // CHECK20-NEXT:    [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00
21668 // CHECK20-NEXT:    [[CONV12:%.*]] = fptrunc double [[ADD11]] to float
21669 // CHECK20-NEXT:    store float [[CONV12]], float* [[ARRAYIDX9]], align 4, !llvm.access.group !25
21670 // CHECK20-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
21671 // CHECK20-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX13]], i32 0, i32 2
21672 // CHECK20-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !25
21673 // CHECK20-NEXT:    [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00
21674 // CHECK20-NEXT:    store double [[ADD15]], double* [[ARRAYIDX14]], align 8, !llvm.access.group !25
21675 // CHECK20-NEXT:    [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]]
21676 // CHECK20-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP20]]
21677 // CHECK20-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX16]], i32 3
21678 // CHECK20-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !25
21679 // CHECK20-NEXT:    [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00
21680 // CHECK20-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !25
21681 // CHECK20-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
21682 // CHECK20-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !25
21683 // CHECK20-NEXT:    [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1
21684 // CHECK20-NEXT:    store i64 [[ADD19]], i64* [[X]], align 4, !llvm.access.group !25
21685 // CHECK20-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
21686 // CHECK20-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !25
21687 // CHECK20-NEXT:    [[CONV20:%.*]] = sext i8 [[TMP23]] to i32
21688 // CHECK20-NEXT:    [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1
21689 // CHECK20-NEXT:    [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8
21690 // CHECK20-NEXT:    store i8 [[CONV22]], i8* [[Y]], align 4, !llvm.access.group !25
21691 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21692 // CHECK20:       omp.body.continue:
21693 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21694 // CHECK20:       omp.inner.for.inc:
21695 // CHECK20-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
21696 // CHECK20-NEXT:    [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1
21697 // CHECK20-NEXT:    store i32 [[ADD23]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
21698 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
21699 // CHECK20:       omp.inner.for.end:
21700 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21701 // CHECK20:       omp.loop.exit:
21702 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
21703 // CHECK20-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21704 // CHECK20-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
21705 // CHECK20-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21706 // CHECK20:       .omp.final.then:
21707 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
21708 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21709 // CHECK20:       .omp.final.done:
21710 // CHECK20-NEXT:    ret void
21711 //
21712 //
21713 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
21714 // CHECK20-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
21715 // CHECK20-NEXT:  entry:
21716 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
21717 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21718 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21719 // CHECK20-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
21720 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
21721 // CHECK20-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
21722 // CHECK20-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
21723 // CHECK20-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
21724 // CHECK20-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
21725 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
21726 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21727 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21728 // CHECK20-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
21729 // CHECK20-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
21730 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21731 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
21732 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
21733 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
21734 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
21735 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
21736 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
21737 // CHECK20-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
21738 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
21739 // CHECK20-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
21740 // CHECK20-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
21741 // CHECK20-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
21742 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AA_CASTED]], align 4
21743 // CHECK20-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV1]], align 1
21744 // CHECK20-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
21745 // CHECK20-NEXT:    store i8 [[TMP7]], i8* [[CONV3]], align 1
21746 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
21747 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]])
21748 // CHECK20-NEXT:    ret void
21749 //
21750 //
21751 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..4
21752 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
21753 // CHECK20-NEXT:  entry:
21754 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21755 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21756 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
21757 // CHECK20-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
21758 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
21759 // CHECK20-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
21760 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
21761 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21762 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21763 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
21764 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
21765 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
21766 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
21767 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21768 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21769 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21770 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21771 // CHECK20-NEXT:    [[I6:%.*]] = alloca i32, align 4
21772 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21773 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21774 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
21775 // CHECK20-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
21776 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
21777 // CHECK20-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
21778 // CHECK20-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
21779 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
21780 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
21781 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
21782 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
21783 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
21784 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
21785 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
21786 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21787 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21788 // CHECK20-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
21789 // CHECK20-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
21790 // CHECK20-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
21791 // CHECK20-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
21792 // CHECK20-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
21793 // CHECK20-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
21794 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21795 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
21796 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21797 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21798 // CHECK20-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
21799 // CHECK20-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
21800 // CHECK20:       omp.precond.then:
21801 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21802 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21803 // CHECK20-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
21804 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21805 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21806 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21807 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
21808 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21809 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21810 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21811 // CHECK20-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
21812 // CHECK20-NEXT:    br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21813 // CHECK20:       cond.true:
21814 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
21815 // CHECK20-NEXT:    br label [[COND_END:%.*]]
21816 // CHECK20:       cond.false:
21817 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21818 // CHECK20-NEXT:    br label [[COND_END]]
21819 // CHECK20:       cond.end:
21820 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
21821 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21822 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21823 // CHECK20-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
21824 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21825 // CHECK20:       omp.inner.for.cond:
21826 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
21827 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
21828 // CHECK20-NEXT:    [[ADD8:%.*]] = add i32 [[TMP17]], 1
21829 // CHECK20-NEXT:    [[CMP9:%.*]] = icmp ult i32 [[TMP16]], [[ADD8]]
21830 // CHECK20-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21831 // CHECK20:       omp.inner.for.body:
21832 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !28
21833 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
21834 // CHECK20-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
21835 // CHECK20-NEXT:    [[ADD10:%.*]] = add i32 [[TMP18]], [[MUL]]
21836 // CHECK20-NEXT:    store i32 [[ADD10]], i32* [[I6]], align 4, !llvm.access.group !28
21837 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !28
21838 // CHECK20-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP20]], 1
21839 // CHECK20-NEXT:    store i32 [[ADD11]], i32* [[A_ADDR]], align 4, !llvm.access.group !28
21840 // CHECK20-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !28
21841 // CHECK20-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP21]] to i32
21842 // CHECK20-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
21843 // CHECK20-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
21844 // CHECK20-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !28
21845 // CHECK20-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV1]], align 1, !llvm.access.group !28
21846 // CHECK20-NEXT:    [[CONV15:%.*]] = sext i8 [[TMP22]] to i32
21847 // CHECK20-NEXT:    [[ADD16:%.*]] = add nsw i32 [[CONV15]], 1
21848 // CHECK20-NEXT:    [[CONV17:%.*]] = trunc i32 [[ADD16]] to i8
21849 // CHECK20-NEXT:    store i8 [[CONV17]], i8* [[CONV1]], align 1, !llvm.access.group !28
21850 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
21851 // CHECK20-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
21852 // CHECK20-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP23]], 1
21853 // CHECK20-NEXT:    store i32 [[ADD18]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
21854 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21855 // CHECK20:       omp.body.continue:
21856 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21857 // CHECK20:       omp.inner.for.inc:
21858 // CHECK20-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
21859 // CHECK20-NEXT:    [[ADD19:%.*]] = add i32 [[TMP24]], 1
21860 // CHECK20-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
21861 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
21862 // CHECK20:       omp.inner.for.end:
21863 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21864 // CHECK20:       omp.loop.exit:
21865 // CHECK20-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21866 // CHECK20-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
21867 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
21868 // CHECK20-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21869 // CHECK20-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
21870 // CHECK20-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21871 // CHECK20:       .omp.final.then:
21872 // CHECK20-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21873 // CHECK20-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
21874 // CHECK20-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
21875 // CHECK20-NEXT:    [[SUB20:%.*]] = sub i32 [[TMP30]], [[TMP31]]
21876 // CHECK20-NEXT:    [[SUB21:%.*]] = sub i32 [[SUB20]], 1
21877 // CHECK20-NEXT:    [[ADD22:%.*]] = add i32 [[SUB21]], 1
21878 // CHECK20-NEXT:    [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
21879 // CHECK20-NEXT:    [[MUL24:%.*]] = mul i32 [[DIV23]], 1
21880 // CHECK20-NEXT:    [[ADD25:%.*]] = add i32 [[TMP29]], [[MUL24]]
21881 // CHECK20-NEXT:    store i32 [[ADD25]], i32* [[I6]], align 4
21882 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21883 // CHECK20:       .omp.final.done:
21884 // CHECK20-NEXT:    br label [[OMP_PRECOND_END]]
21885 // CHECK20:       omp.precond.end:
21886 // CHECK20-NEXT:    ret void
21887 //
21888 //
21889 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
21890 // CHECK20-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
21891 // CHECK20-NEXT:  entry:
21892 // CHECK20-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
21893 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
21894 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
21895 // CHECK20-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
21896 // CHECK20-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
21897 // CHECK20-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
21898 // CHECK20-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
21899 // CHECK20-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
21900 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
21901 // CHECK20-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
21902 // CHECK20-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
21903 // CHECK20-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
21904 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
21905 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
21906 // CHECK20-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
21907 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
21908 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
21909 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
21910 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
21911 // CHECK20-NEXT:    ret void
21912 //
21913 //
21914 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..5
21915 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] {
21916 // CHECK20-NEXT:  entry:
21917 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
21918 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
21919 // CHECK20-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
21920 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
21921 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
21922 // CHECK20-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
21923 // CHECK20-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
21924 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21925 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21926 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21927 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21928 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21929 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21930 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
21931 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
21932 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
21933 // CHECK20-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
21934 // CHECK20-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
21935 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
21936 // CHECK20-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
21937 // CHECK20-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
21938 // CHECK20-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
21939 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
21940 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
21941 // CHECK20-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
21942 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21943 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
21944 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21945 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21946 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
21947 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
21948 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21949 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21950 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
21951 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21952 // CHECK20:       cond.true:
21953 // CHECK20-NEXT:    br label [[COND_END:%.*]]
21954 // CHECK20:       cond.false:
21955 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21956 // CHECK20-NEXT:    br label [[COND_END]]
21957 // CHECK20:       cond.end:
21958 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
21959 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21960 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21961 // CHECK20-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
21962 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21963 // CHECK20:       omp.inner.for.cond:
21964 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
21965 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
21966 // CHECK20-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
21967 // CHECK20-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21968 // CHECK20:       omp.inner.for.body:
21969 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
21970 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
21971 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
21972 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !31
21973 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !31
21974 // CHECK20-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP12]] to double
21975 // CHECK20-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
21976 // CHECK20-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
21977 // CHECK20-NEXT:    store double [[ADD4]], double* [[A]], align 4, !llvm.access.group !31
21978 // CHECK20-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
21979 // CHECK20-NEXT:    [[TMP13:%.*]] = load double, double* [[A5]], align 4, !llvm.access.group !31
21980 // CHECK20-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
21981 // CHECK20-NEXT:    store double [[INC]], double* [[A5]], align 4, !llvm.access.group !31
21982 // CHECK20-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
21983 // CHECK20-NEXT:    [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]]
21984 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]]
21985 // CHECK20-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
21986 // CHECK20-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !31
21987 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21988 // CHECK20:       omp.body.continue:
21989 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21990 // CHECK20:       omp.inner.for.inc:
21991 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
21992 // CHECK20-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1
21993 // CHECK20-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
21994 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
21995 // CHECK20:       omp.inner.for.end:
21996 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21997 // CHECK20:       omp.loop.exit:
21998 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
21999 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22000 // CHECK20-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
22001 // CHECK20-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22002 // CHECK20:       .omp.final.then:
22003 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
22004 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22005 // CHECK20:       .omp.final.done:
22006 // CHECK20-NEXT:    ret void
22007 //
22008 //
22009 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
22010 // CHECK20-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
22011 // CHECK20-NEXT:  entry:
22012 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22013 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22014 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
22015 // CHECK20-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
22016 // CHECK20-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
22017 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22018 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22019 // CHECK20-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
22020 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22021 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
22022 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
22023 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
22024 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
22025 // CHECK20-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
22026 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
22027 // CHECK20-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
22028 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
22029 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
22030 // CHECK20-NEXT:    ret void
22031 //
22032 //
22033 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6
22034 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
22035 // CHECK20-NEXT:  entry:
22036 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22037 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22038 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22039 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22040 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
22041 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22042 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22043 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22044 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22045 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22046 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22047 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
22048 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22049 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22050 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22051 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22052 // CHECK20-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
22053 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22054 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
22055 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22056 // CHECK20-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
22057 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22058 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22059 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22060 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
22061 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22062 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22063 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
22064 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22065 // CHECK20:       cond.true:
22066 // CHECK20-NEXT:    br label [[COND_END:%.*]]
22067 // CHECK20:       cond.false:
22068 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22069 // CHECK20-NEXT:    br label [[COND_END]]
22070 // CHECK20:       cond.end:
22071 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
22072 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22073 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22074 // CHECK20-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
22075 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22076 // CHECK20:       omp.inner.for.cond:
22077 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
22078 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
22079 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
22080 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22081 // CHECK20:       omp.inner.for.body:
22082 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
22083 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
22084 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22085 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !34
22086 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !34
22087 // CHECK20-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
22088 // CHECK20-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !34
22089 // CHECK20-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !34
22090 // CHECK20-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
22091 // CHECK20-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
22092 // CHECK20-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
22093 // CHECK20-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !34
22094 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
22095 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !34
22096 // CHECK20-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
22097 // CHECK20-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !34
22098 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22099 // CHECK20:       omp.body.continue:
22100 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22101 // CHECK20:       omp.inner.for.inc:
22102 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
22103 // CHECK20-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
22104 // CHECK20-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
22105 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
22106 // CHECK20:       omp.inner.for.end:
22107 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22108 // CHECK20:       omp.loop.exit:
22109 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
22110 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22111 // CHECK20-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
22112 // CHECK20-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22113 // CHECK20:       .omp.final.then:
22114 // CHECK20-NEXT:    store i32 10, i32* [[I]], align 4
22115 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22116 // CHECK20:       .omp.final.done:
22117 // CHECK20-NEXT:    ret void
22118 //
22119 //
22120 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
22121 // CHECK21-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
22122 // CHECK21-NEXT:  entry:
22123 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22124 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
22125 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
22126 // CHECK21-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
22127 // CHECK21-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
22128 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22129 // CHECK21-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
22130 // CHECK21-NEXT:    store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8
22131 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22132 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
22133 // CHECK21-NEXT:    [[CONV4:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32*
22134 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV3]], align 4
22135 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV4]], align 4
22136 // CHECK21-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
22137 // CHECK21-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
22138 // CHECK21-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
22139 // CHECK21-NEXT:    store i16 [[TMP3]], i16* [[CONV5]], align 2
22140 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
22141 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP4]])
22142 // CHECK21-NEXT:    ret void
22143 //
22144 //
22145 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined.
22146 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
22147 // CHECK21-NEXT:  entry:
22148 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22149 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22150 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22151 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22152 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22153 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22154 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22155 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22156 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22157 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
22158 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22159 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22160 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22161 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22162 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22163 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
22164 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22165 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22166 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22167 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
22168 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22169 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22170 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
22171 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22172 // CHECK21:       cond.true:
22173 // CHECK21-NEXT:    br label [[COND_END:%.*]]
22174 // CHECK21:       cond.false:
22175 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22176 // CHECK21-NEXT:    br label [[COND_END]]
22177 // CHECK21:       cond.end:
22178 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
22179 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22180 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22181 // CHECK21-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
22182 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22183 // CHECK21:       omp.inner.for.cond:
22184 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
22185 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
22186 // CHECK21-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
22187 // CHECK21-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22188 // CHECK21:       omp.inner.for.body:
22189 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
22190 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
22191 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22192 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
22193 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22194 // CHECK21:       omp.body.continue:
22195 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22196 // CHECK21:       omp.inner.for.inc:
22197 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
22198 // CHECK21-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
22199 // CHECK21-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
22200 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
22201 // CHECK21:       omp.inner.for.end:
22202 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22203 // CHECK21:       omp.loop.exit:
22204 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
22205 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22206 // CHECK21-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
22207 // CHECK21-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22208 // CHECK21:       .omp.final.then:
22209 // CHECK21-NEXT:    store i32 10, i32* [[I]], align 4
22210 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22211 // CHECK21:       .omp.final.done:
22212 // CHECK21-NEXT:    ret void
22213 //
22214 //
22215 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
22216 // CHECK21-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR0]] {
22217 // CHECK21-NEXT:  entry:
22218 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22219 // CHECK21-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
22220 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22221 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22222 // CHECK21-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
22223 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
22224 // CHECK21-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
22225 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
22226 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
22227 // CHECK21-NEXT:    ret void
22228 //
22229 //
22230 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..1
22231 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
22232 // CHECK21-NEXT:  entry:
22233 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22234 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22235 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22236 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22237 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22238 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22239 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22240 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22241 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22242 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
22243 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22244 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22245 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22246 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22247 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22248 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
22249 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22250 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22251 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22252 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
22253 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22254 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22255 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
22256 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22257 // CHECK21:       cond.true:
22258 // CHECK21-NEXT:    br label [[COND_END:%.*]]
22259 // CHECK21:       cond.false:
22260 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22261 // CHECK21-NEXT:    br label [[COND_END]]
22262 // CHECK21:       cond.end:
22263 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
22264 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22265 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22266 // CHECK21-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
22267 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22268 // CHECK21:       omp.inner.for.cond:
22269 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
22270 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
22271 // CHECK21-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
22272 // CHECK21-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22273 // CHECK21:       omp.inner.for.body:
22274 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
22275 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
22276 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22277 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18
22278 // CHECK21-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !18
22279 // CHECK21-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
22280 // CHECK21-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
22281 // CHECK21-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
22282 // CHECK21-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !18
22283 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22284 // CHECK21:       omp.body.continue:
22285 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22286 // CHECK21:       omp.inner.for.inc:
22287 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
22288 // CHECK21-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
22289 // CHECK21-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
22290 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
22291 // CHECK21:       omp.inner.for.end:
22292 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22293 // CHECK21:       omp.loop.exit:
22294 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
22295 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22296 // CHECK21-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
22297 // CHECK21-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22298 // CHECK21:       .omp.final.then:
22299 // CHECK21-NEXT:    store i32 10, i32* [[I]], align 4
22300 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22301 // CHECK21:       .omp.final.done:
22302 // CHECK21-NEXT:    ret void
22303 //
22304 //
22305 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
22306 // CHECK21-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
22307 // CHECK21-NEXT:  entry:
22308 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22309 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22310 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
22311 // CHECK21-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
22312 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22313 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22314 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22315 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22316 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
22317 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
22318 // CHECK21-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
22319 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
22320 // CHECK21-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
22321 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
22322 // CHECK21-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
22323 // CHECK21-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
22324 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
22325 // CHECK21-NEXT:    ret void
22326 //
22327 //
22328 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..2
22329 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
22330 // CHECK21-NEXT:  entry:
22331 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22332 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22333 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22334 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22335 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22336 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22337 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22338 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22339 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22340 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22341 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
22342 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22343 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22344 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22345 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22346 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22347 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22348 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22349 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
22350 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22351 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22352 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22353 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
22354 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22355 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22356 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
22357 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22358 // CHECK21:       cond.true:
22359 // CHECK21-NEXT:    br label [[COND_END:%.*]]
22360 // CHECK21:       cond.false:
22361 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22362 // CHECK21-NEXT:    br label [[COND_END]]
22363 // CHECK21:       cond.end:
22364 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
22365 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22366 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22367 // CHECK21-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
22368 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22369 // CHECK21:       omp.inner.for.cond:
22370 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
22371 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
22372 // CHECK21-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
22373 // CHECK21-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22374 // CHECK21:       omp.inner.for.body:
22375 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
22376 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
22377 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22378 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
22379 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !21
22380 // CHECK21-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
22381 // CHECK21-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !21
22382 // CHECK21-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !21
22383 // CHECK21-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
22384 // CHECK21-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
22385 // CHECK21-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
22386 // CHECK21-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !21
22387 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22388 // CHECK21:       omp.body.continue:
22389 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22390 // CHECK21:       omp.inner.for.inc:
22391 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
22392 // CHECK21-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
22393 // CHECK21-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
22394 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
22395 // CHECK21:       omp.inner.for.end:
22396 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22397 // CHECK21:       omp.loop.exit:
22398 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
22399 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22400 // CHECK21-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
22401 // CHECK21-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22402 // CHECK21:       .omp.final.then:
22403 // CHECK21-NEXT:    store i32 10, i32* [[I]], align 4
22404 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22405 // CHECK21:       .omp.final.done:
22406 // CHECK21-NEXT:    ret void
22407 //
22408 //
22409 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
22410 // CHECK21-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
22411 // CHECK21-NEXT:  entry:
22412 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22413 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
22414 // CHECK21-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22415 // CHECK21-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
22416 // CHECK21-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
22417 // CHECK21-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
22418 // CHECK21-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
22419 // CHECK21-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
22420 // CHECK21-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
22421 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
22422 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22423 // CHECK21-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
22424 // CHECK21-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22425 // CHECK21-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
22426 // CHECK21-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
22427 // CHECK21-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
22428 // CHECK21-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
22429 // CHECK21-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
22430 // CHECK21-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
22431 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22432 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
22433 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22434 // CHECK21-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
22435 // CHECK21-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
22436 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
22437 // CHECK21-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
22438 // CHECK21-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
22439 // CHECK21-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
22440 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
22441 // CHECK21-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
22442 // CHECK21-NEXT:    store i32 [[TMP8]], i32* [[CONV5]], align 4
22443 // CHECK21-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
22444 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
22445 // CHECK21-NEXT:    ret void
22446 //
22447 //
22448 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..3
22449 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR1]] {
22450 // CHECK21-NEXT:  entry:
22451 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22452 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22453 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22454 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
22455 // CHECK21-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22456 // CHECK21-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
22457 // CHECK21-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
22458 // CHECK21-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
22459 // CHECK21-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
22460 // CHECK21-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
22461 // CHECK21-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
22462 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22463 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22464 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22465 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22466 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22467 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22468 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
22469 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22470 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22471 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22472 // CHECK21-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
22473 // CHECK21-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22474 // CHECK21-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
22475 // CHECK21-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
22476 // CHECK21-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
22477 // CHECK21-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
22478 // CHECK21-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
22479 // CHECK21-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
22480 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22481 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
22482 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22483 // CHECK21-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
22484 // CHECK21-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
22485 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
22486 // CHECK21-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
22487 // CHECK21-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
22488 // CHECK21-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
22489 // CHECK21-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 0
22490 // CHECK21-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
22491 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22492 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
22493 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22494 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22495 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22496 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
22497 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22498 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22499 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
22500 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22501 // CHECK21:       cond.true:
22502 // CHECK21-NEXT:    br label [[COND_END:%.*]]
22503 // CHECK21:       cond.false:
22504 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22505 // CHECK21-NEXT:    br label [[COND_END]]
22506 // CHECK21:       cond.end:
22507 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
22508 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22509 // CHECK21-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22510 // CHECK21-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
22511 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22512 // CHECK21:       omp.inner.for.cond:
22513 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
22514 // CHECK21-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
22515 // CHECK21-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
22516 // CHECK21-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22517 // CHECK21:       omp.inner.for.body:
22518 // CHECK21-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
22519 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
22520 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22521 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
22522 // CHECK21-NEXT:    [[TMP16:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !24
22523 // CHECK21-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
22524 // CHECK21-NEXT:    store i32 [[ADD6]], i32* [[CONV]], align 4, !llvm.access.group !24
22525 // CHECK21-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
22526 // CHECK21-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24
22527 // CHECK21-NEXT:    [[CONV7:%.*]] = fpext float [[TMP17]] to double
22528 // CHECK21-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
22529 // CHECK21-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
22530 // CHECK21-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24
22531 // CHECK21-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
22532 // CHECK21-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
22533 // CHECK21-NEXT:    [[CONV11:%.*]] = fpext float [[TMP18]] to double
22534 // CHECK21-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
22535 // CHECK21-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
22536 // CHECK21-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
22537 // CHECK21-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
22538 // CHECK21-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i64 0, i64 2
22539 // CHECK21-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
22540 // CHECK21-NEXT:    [[ADD16:%.*]] = fadd double [[TMP19]], 1.000000e+00
22541 // CHECK21-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
22542 // CHECK21-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]]
22543 // CHECK21-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP20]]
22544 // CHECK21-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i64 3
22545 // CHECK21-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
22546 // CHECK21-NEXT:    [[ADD19:%.*]] = fadd double [[TMP21]], 1.000000e+00
22547 // CHECK21-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
22548 // CHECK21-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
22549 // CHECK21-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !24
22550 // CHECK21-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP22]], 1
22551 // CHECK21-NEXT:    store i64 [[ADD20]], i64* [[X]], align 8, !llvm.access.group !24
22552 // CHECK21-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
22553 // CHECK21-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !24
22554 // CHECK21-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP23]] to i32
22555 // CHECK21-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
22556 // CHECK21-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
22557 // CHECK21-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 8, !llvm.access.group !24
22558 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22559 // CHECK21:       omp.body.continue:
22560 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22561 // CHECK21:       omp.inner.for.inc:
22562 // CHECK21-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
22563 // CHECK21-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP24]], 1
22564 // CHECK21-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
22565 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
22566 // CHECK21:       omp.inner.for.end:
22567 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22568 // CHECK21:       omp.loop.exit:
22569 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
22570 // CHECK21-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22571 // CHECK21-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
22572 // CHECK21-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22573 // CHECK21:       .omp.final.then:
22574 // CHECK21-NEXT:    store i32 10, i32* [[I]], align 4
22575 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22576 // CHECK21:       .omp.final.done:
22577 // CHECK21-NEXT:    ret void
22578 //
22579 //
22580 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
22581 // CHECK21-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
22582 // CHECK21-NEXT:  entry:
22583 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22584 // CHECK21-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22585 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22586 // CHECK21-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
22587 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
22588 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
22589 // CHECK21-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
22590 // CHECK21-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
22591 // CHECK21-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
22592 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22593 // CHECK21-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
22594 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22595 // CHECK21-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
22596 // CHECK21-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
22597 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22598 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
22599 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22600 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
22601 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
22602 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
22603 // CHECK21-NEXT:    [[CONV4:%.*]] = bitcast i64* [[A_CASTED]] to i32*
22604 // CHECK21-NEXT:    store i32 [[TMP1]], i32* [[CONV4]], align 4
22605 // CHECK21-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
22606 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 4
22607 // CHECK21-NEXT:    [[CONV5:%.*]] = bitcast i64* [[N_CASTED]] to i32*
22608 // CHECK21-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
22609 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
22610 // CHECK21-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV2]], align 2
22611 // CHECK21-NEXT:    [[CONV6:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
22612 // CHECK21-NEXT:    store i16 [[TMP5]], i16* [[CONV6]], align 2
22613 // CHECK21-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AA_CASTED]], align 8
22614 // CHECK21-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
22615 // CHECK21-NEXT:    [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
22616 // CHECK21-NEXT:    store i8 [[TMP7]], i8* [[CONV7]], align 1
22617 // CHECK21-NEXT:    [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
22618 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]])
22619 // CHECK21-NEXT:    ret void
22620 //
22621 //
22622 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..4
22623 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
22624 // CHECK21-NEXT:  entry:
22625 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22626 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22627 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22628 // CHECK21-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
22629 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22630 // CHECK21-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
22631 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
22632 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22633 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22634 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
22635 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
22636 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
22637 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
22638 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22639 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22640 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22641 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22642 // CHECK21-NEXT:    [[I8:%.*]] = alloca i32, align 4
22643 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22644 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22645 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22646 // CHECK21-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
22647 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22648 // CHECK21-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
22649 // CHECK21-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
22650 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22651 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
22652 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22653 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
22654 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
22655 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
22656 // CHECK21-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
22657 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
22658 // CHECK21-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_4]], align 4
22659 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
22660 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22661 // CHECK21-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
22662 // CHECK21-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
22663 // CHECK21-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
22664 // CHECK21-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
22665 // CHECK21-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
22666 // CHECK21-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
22667 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22668 // CHECK21-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
22669 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22670 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
22671 // CHECK21-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
22672 // CHECK21-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
22673 // CHECK21:       omp.precond.then:
22674 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22675 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
22676 // CHECK21-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
22677 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22678 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22679 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22680 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
22681 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22682 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22683 // CHECK21-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
22684 // CHECK21-NEXT:    [[CMP9:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
22685 // CHECK21-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22686 // CHECK21:       cond.true:
22687 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
22688 // CHECK21-NEXT:    br label [[COND_END:%.*]]
22689 // CHECK21:       cond.false:
22690 // CHECK21-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22691 // CHECK21-NEXT:    br label [[COND_END]]
22692 // CHECK21:       cond.end:
22693 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
22694 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22695 // CHECK21-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22696 // CHECK21-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
22697 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22698 // CHECK21:       omp.inner.for.cond:
22699 // CHECK21-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
22700 // CHECK21-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
22701 // CHECK21-NEXT:    [[ADD10:%.*]] = add i32 [[TMP17]], 1
22702 // CHECK21-NEXT:    [[CMP11:%.*]] = icmp ult i32 [[TMP16]], [[ADD10]]
22703 // CHECK21-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22704 // CHECK21:       omp.inner.for.body:
22705 // CHECK21-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !27
22706 // CHECK21-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
22707 // CHECK21-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
22708 // CHECK21-NEXT:    [[ADD12:%.*]] = add i32 [[TMP18]], [[MUL]]
22709 // CHECK21-NEXT:    store i32 [[ADD12]], i32* [[I8]], align 4, !llvm.access.group !27
22710 // CHECK21-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !27
22711 // CHECK21-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP20]], 1
22712 // CHECK21-NEXT:    store i32 [[ADD13]], i32* [[CONV]], align 4, !llvm.access.group !27
22713 // CHECK21-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV2]], align 2, !llvm.access.group !27
22714 // CHECK21-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP21]] to i32
22715 // CHECK21-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
22716 // CHECK21-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
22717 // CHECK21-NEXT:    store i16 [[CONV16]], i16* [[CONV2]], align 2, !llvm.access.group !27
22718 // CHECK21-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV3]], align 1, !llvm.access.group !27
22719 // CHECK21-NEXT:    [[CONV17:%.*]] = sext i8 [[TMP22]] to i32
22720 // CHECK21-NEXT:    [[ADD18:%.*]] = add nsw i32 [[CONV17]], 1
22721 // CHECK21-NEXT:    [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
22722 // CHECK21-NEXT:    store i8 [[CONV19]], i8* [[CONV3]], align 1, !llvm.access.group !27
22723 // CHECK21-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
22724 // CHECK21-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
22725 // CHECK21-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP23]], 1
22726 // CHECK21-NEXT:    store i32 [[ADD20]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
22727 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22728 // CHECK21:       omp.body.continue:
22729 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22730 // CHECK21:       omp.inner.for.inc:
22731 // CHECK21-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
22732 // CHECK21-NEXT:    [[ADD21:%.*]] = add i32 [[TMP24]], 1
22733 // CHECK21-NEXT:    store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
22734 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
22735 // CHECK21:       omp.inner.for.end:
22736 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22737 // CHECK21:       omp.loop.exit:
22738 // CHECK21-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22739 // CHECK21-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
22740 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
22741 // CHECK21-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22742 // CHECK21-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
22743 // CHECK21-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22744 // CHECK21:       .omp.final.then:
22745 // CHECK21-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22746 // CHECK21-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
22747 // CHECK21-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
22748 // CHECK21-NEXT:    [[SUB22:%.*]] = sub i32 [[TMP30]], [[TMP31]]
22749 // CHECK21-NEXT:    [[SUB23:%.*]] = sub i32 [[SUB22]], 1
22750 // CHECK21-NEXT:    [[ADD24:%.*]] = add i32 [[SUB23]], 1
22751 // CHECK21-NEXT:    [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
22752 // CHECK21-NEXT:    [[MUL26:%.*]] = mul i32 [[DIV25]], 1
22753 // CHECK21-NEXT:    [[ADD27:%.*]] = add i32 [[TMP29]], [[MUL26]]
22754 // CHECK21-NEXT:    store i32 [[ADD27]], i32* [[I8]], align 4
22755 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22756 // CHECK21:       .omp.final.done:
22757 // CHECK21-NEXT:    br label [[OMP_PRECOND_END]]
22758 // CHECK21:       omp.precond.end:
22759 // CHECK21-NEXT:    ret void
22760 //
22761 //
22762 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
22763 // CHECK21-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
22764 // CHECK21-NEXT:  entry:
22765 // CHECK21-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
22766 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
22767 // CHECK21-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22768 // CHECK21-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
22769 // CHECK21-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
22770 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
22771 // CHECK21-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
22772 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
22773 // CHECK21-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
22774 // CHECK21-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
22775 // CHECK21-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22776 // CHECK21-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
22777 // CHECK21-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
22778 // CHECK21-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
22779 // CHECK21-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
22780 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
22781 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22782 // CHECK21-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
22783 // CHECK21-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
22784 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
22785 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
22786 // CHECK21-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32*
22787 // CHECK21-NEXT:    store i32 [[TMP4]], i32* [[CONV4]], align 4
22788 // CHECK21-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
22789 // CHECK21-NEXT:    [[TMP6:%.*]] = load i8, i8* [[CONV3]], align 1
22790 // CHECK21-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
22791 // CHECK21-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
22792 // CHECK21-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
22793 // CHECK21-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV5]], align 1
22794 // CHECK21-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
22795 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]], i64 [[TMP7]])
22796 // CHECK21-NEXT:    ret void
22797 //
22798 //
22799 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..5
22800 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
22801 // CHECK21-NEXT:  entry:
22802 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22803 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22804 // CHECK21-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
22805 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
22806 // CHECK21-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
22807 // CHECK21-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
22808 // CHECK21-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
22809 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
22810 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22811 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22812 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22813 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22814 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22815 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22816 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
22817 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22818 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22819 // CHECK21-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
22820 // CHECK21-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
22821 // CHECK21-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
22822 // CHECK21-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
22823 // CHECK21-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
22824 // CHECK21-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
22825 // CHECK21-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
22826 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
22827 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
22828 // CHECK21-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
22829 // CHECK21-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
22830 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
22831 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22832 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
22833 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22834 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22835 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22836 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
22837 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22838 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22839 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
22840 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22841 // CHECK21:       cond.true:
22842 // CHECK21-NEXT:    br label [[COND_END:%.*]]
22843 // CHECK21:       cond.false:
22844 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22845 // CHECK21-NEXT:    br label [[COND_END]]
22846 // CHECK21:       cond.end:
22847 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
22848 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22849 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22850 // CHECK21-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
22851 // CHECK21-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1
22852 // CHECK21-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
22853 // CHECK21-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
22854 // CHECK21:       omp_if.then:
22855 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22856 // CHECK21:       omp.inner.for.cond:
22857 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
22858 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
22859 // CHECK21-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
22860 // CHECK21-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22861 // CHECK21:       omp.inner.for.body:
22862 // CHECK21-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
22863 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
22864 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
22865 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !30
22866 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !30
22867 // CHECK21-NEXT:    [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double
22868 // CHECK21-NEXT:    [[ADD6:%.*]] = fadd double [[CONV5]], 1.500000e+00
22869 // CHECK21-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
22870 // CHECK21-NEXT:    store double [[ADD6]], double* [[A]], align 8, !llvm.access.group !30
22871 // CHECK21-NEXT:    [[A7:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
22872 // CHECK21-NEXT:    [[TMP14:%.*]] = load double, double* [[A7]], align 8, !llvm.access.group !30
22873 // CHECK21-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
22874 // CHECK21-NEXT:    store double [[INC]], double* [[A7]], align 8, !llvm.access.group !30
22875 // CHECK21-NEXT:    [[CONV8:%.*]] = fptosi double [[INC]] to i16
22876 // CHECK21-NEXT:    [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]]
22877 // CHECK21-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]]
22878 // CHECK21-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
22879 // CHECK21-NEXT:    store i16 [[CONV8]], i16* [[ARRAYIDX9]], align 2, !llvm.access.group !30
22880 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22881 // CHECK21:       omp.body.continue:
22882 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22883 // CHECK21:       omp.inner.for.inc:
22884 // CHECK21-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
22885 // CHECK21-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
22886 // CHECK21-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
22887 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
22888 // CHECK21:       omp.inner.for.end:
22889 // CHECK21-NEXT:    br label [[OMP_IF_END:%.*]]
22890 // CHECK21:       omp_if.else:
22891 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND11:%.*]]
22892 // CHECK21:       omp.inner.for.cond11:
22893 // CHECK21-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22894 // CHECK21-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22895 // CHECK21-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
22896 // CHECK21-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END27:%.*]]
22897 // CHECK21:       omp.inner.for.body13:
22898 // CHECK21-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22899 // CHECK21-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[TMP19]], 1
22900 // CHECK21-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
22901 // CHECK21-NEXT:    store i32 [[ADD15]], i32* [[I]], align 4
22902 // CHECK21-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4
22903 // CHECK21-NEXT:    [[CONV16:%.*]] = sitofp i32 [[TMP20]] to double
22904 // CHECK21-NEXT:    [[ADD17:%.*]] = fadd double [[CONV16]], 1.500000e+00
22905 // CHECK21-NEXT:    [[A18:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
22906 // CHECK21-NEXT:    store double [[ADD17]], double* [[A18]], align 8
22907 // CHECK21-NEXT:    [[A19:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
22908 // CHECK21-NEXT:    [[TMP21:%.*]] = load double, double* [[A19]], align 8
22909 // CHECK21-NEXT:    [[INC20:%.*]] = fadd double [[TMP21]], 1.000000e+00
22910 // CHECK21-NEXT:    store double [[INC20]], double* [[A19]], align 8
22911 // CHECK21-NEXT:    [[CONV21:%.*]] = fptosi double [[INC20]] to i16
22912 // CHECK21-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
22913 // CHECK21-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP22]]
22914 // CHECK21-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX22]], i64 1
22915 // CHECK21-NEXT:    store i16 [[CONV21]], i16* [[ARRAYIDX23]], align 2
22916 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE24:%.*]]
22917 // CHECK21:       omp.body.continue24:
22918 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC25:%.*]]
22919 // CHECK21:       omp.inner.for.inc25:
22920 // CHECK21-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22921 // CHECK21-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP23]], 1
22922 // CHECK21-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4
22923 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND11]], !llvm.loop [[LOOP33:![0-9]+]]
22924 // CHECK21:       omp.inner.for.end27:
22925 // CHECK21-NEXT:    br label [[OMP_IF_END]]
22926 // CHECK21:       omp_if.end:
22927 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22928 // CHECK21:       omp.loop.exit:
22929 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
22930 // CHECK21-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22931 // CHECK21-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
22932 // CHECK21-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22933 // CHECK21:       .omp.final.then:
22934 // CHECK21-NEXT:    store i32 10, i32* [[I]], align 4
22935 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22936 // CHECK21:       .omp.final.done:
22937 // CHECK21-NEXT:    ret void
22938 //
22939 //
22940 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
22941 // CHECK21-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
22942 // CHECK21-NEXT:  entry:
22943 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22944 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22945 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
22946 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
22947 // CHECK21-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
22948 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22949 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22950 // CHECK21-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
22951 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22952 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22953 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
22954 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
22955 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
22956 // CHECK21-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
22957 // CHECK21-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
22958 // CHECK21-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
22959 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
22960 // CHECK21-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
22961 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
22962 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
22963 // CHECK21-NEXT:    ret void
22964 //
22965 //
22966 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..6
22967 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
22968 // CHECK21-NEXT:  entry:
22969 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22970 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22971 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22972 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22973 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
22974 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22975 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22976 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22977 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22978 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22979 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22980 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
22981 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22982 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22983 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22984 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22985 // CHECK21-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
22986 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22987 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22988 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
22989 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22990 // CHECK21-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
22991 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22992 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22993 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22994 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
22995 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22996 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22997 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
22998 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22999 // CHECK21:       cond.true:
23000 // CHECK21-NEXT:    br label [[COND_END:%.*]]
23001 // CHECK21:       cond.false:
23002 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23003 // CHECK21-NEXT:    br label [[COND_END]]
23004 // CHECK21:       cond.end:
23005 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
23006 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23007 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23008 // CHECK21-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
23009 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23010 // CHECK21:       omp.inner.for.cond:
23011 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
23012 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
23013 // CHECK21-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
23014 // CHECK21-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23015 // CHECK21:       omp.inner.for.body:
23016 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
23017 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
23018 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23019 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !35
23020 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !35
23021 // CHECK21-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
23022 // CHECK21-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !35
23023 // CHECK21-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !35
23024 // CHECK21-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
23025 // CHECK21-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
23026 // CHECK21-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
23027 // CHECK21-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !35
23028 // CHECK21-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
23029 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !35
23030 // CHECK21-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
23031 // CHECK21-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !35
23032 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23033 // CHECK21:       omp.body.continue:
23034 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23035 // CHECK21:       omp.inner.for.inc:
23036 // CHECK21-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
23037 // CHECK21-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP12]], 1
23038 // CHECK21-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
23039 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
23040 // CHECK21:       omp.inner.for.end:
23041 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23042 // CHECK21:       omp.loop.exit:
23043 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
23044 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23045 // CHECK21-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
23046 // CHECK21-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23047 // CHECK21:       .omp.final.then:
23048 // CHECK21-NEXT:    store i32 10, i32* [[I]], align 4
23049 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23050 // CHECK21:       .omp.final.done:
23051 // CHECK21-NEXT:    ret void
23052 //
23053 //
23054 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
23055 // CHECK22-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
23056 // CHECK22-NEXT:  entry:
23057 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23058 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
23059 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
23060 // CHECK22-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
23061 // CHECK22-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
23062 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23063 // CHECK22-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
23064 // CHECK22-NEXT:    store i64 [[DOTCAPTURE_EXPR_1]], i64* [[DOTCAPTURE_EXPR__ADDR2]], align 8
23065 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23066 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
23067 // CHECK22-NEXT:    [[CONV4:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR2]] to i32*
23068 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV3]], align 4
23069 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV4]], align 4
23070 // CHECK22-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
23071 // CHECK22-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
23072 // CHECK22-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
23073 // CHECK22-NEXT:    store i16 [[TMP3]], i16* [[CONV5]], align 2
23074 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
23075 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP4]])
23076 // CHECK22-NEXT:    ret void
23077 //
23078 //
23079 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined.
23080 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
23081 // CHECK22-NEXT:  entry:
23082 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23083 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23084 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23085 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23086 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23087 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23088 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23089 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23090 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23091 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
23092 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23093 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23094 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23095 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23096 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23097 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
23098 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23099 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23100 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23101 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
23102 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23103 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23104 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
23105 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23106 // CHECK22:       cond.true:
23107 // CHECK22-NEXT:    br label [[COND_END:%.*]]
23108 // CHECK22:       cond.false:
23109 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23110 // CHECK22-NEXT:    br label [[COND_END]]
23111 // CHECK22:       cond.end:
23112 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
23113 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23114 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23115 // CHECK22-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
23116 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23117 // CHECK22:       omp.inner.for.cond:
23118 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
23119 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
23120 // CHECK22-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
23121 // CHECK22-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23122 // CHECK22:       omp.inner.for.body:
23123 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
23124 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
23125 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23126 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
23127 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23128 // CHECK22:       omp.body.continue:
23129 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23130 // CHECK22:       omp.inner.for.inc:
23131 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
23132 // CHECK22-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
23133 // CHECK22-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
23134 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
23135 // CHECK22:       omp.inner.for.end:
23136 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23137 // CHECK22:       omp.loop.exit:
23138 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
23139 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23140 // CHECK22-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
23141 // CHECK22-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23142 // CHECK22:       .omp.final.then:
23143 // CHECK22-NEXT:    store i32 10, i32* [[I]], align 4
23144 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23145 // CHECK22:       .omp.final.done:
23146 // CHECK22-NEXT:    ret void
23147 //
23148 //
23149 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
23150 // CHECK22-SAME: (i64 noundef [[AA:%.*]]) #[[ATTR0]] {
23151 // CHECK22-NEXT:  entry:
23152 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23153 // CHECK22-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
23154 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23155 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23156 // CHECK22-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
23157 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
23158 // CHECK22-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
23159 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
23160 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]])
23161 // CHECK22-NEXT:    ret void
23162 //
23163 //
23164 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..1
23165 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
23166 // CHECK22-NEXT:  entry:
23167 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23168 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23169 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23170 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23171 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23172 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23173 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23174 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23175 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23176 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
23177 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23178 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23179 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23180 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23181 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23182 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
23183 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23184 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23185 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23186 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
23187 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23188 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23189 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
23190 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23191 // CHECK22:       cond.true:
23192 // CHECK22-NEXT:    br label [[COND_END:%.*]]
23193 // CHECK22:       cond.false:
23194 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23195 // CHECK22-NEXT:    br label [[COND_END]]
23196 // CHECK22:       cond.end:
23197 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
23198 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23199 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23200 // CHECK22-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
23201 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23202 // CHECK22:       omp.inner.for.cond:
23203 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23204 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
23205 // CHECK22-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
23206 // CHECK22-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23207 // CHECK22:       omp.inner.for.body:
23208 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23209 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
23210 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23211 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18
23212 // CHECK22-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !18
23213 // CHECK22-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
23214 // CHECK22-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
23215 // CHECK22-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
23216 // CHECK22-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !18
23217 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23218 // CHECK22:       omp.body.continue:
23219 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23220 // CHECK22:       omp.inner.for.inc:
23221 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23222 // CHECK22-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
23223 // CHECK22-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
23224 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
23225 // CHECK22:       omp.inner.for.end:
23226 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23227 // CHECK22:       omp.loop.exit:
23228 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
23229 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23230 // CHECK22-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
23231 // CHECK22-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23232 // CHECK22:       .omp.final.then:
23233 // CHECK22-NEXT:    store i32 10, i32* [[I]], align 4
23234 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23235 // CHECK22:       .omp.final.done:
23236 // CHECK22-NEXT:    ret void
23237 //
23238 //
23239 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
23240 // CHECK22-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
23241 // CHECK22-NEXT:  entry:
23242 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
23243 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23244 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
23245 // CHECK22-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
23246 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
23247 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23248 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
23249 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23250 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
23251 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
23252 // CHECK22-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
23253 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
23254 // CHECK22-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
23255 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
23256 // CHECK22-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
23257 // CHECK22-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
23258 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
23259 // CHECK22-NEXT:    ret void
23260 //
23261 //
23262 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..2
23263 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
23264 // CHECK22-NEXT:  entry:
23265 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23266 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23267 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
23268 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23269 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23270 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23271 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23272 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23273 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23274 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23275 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
23276 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23277 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23278 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
23279 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23280 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
23281 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23282 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23283 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
23284 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23285 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23286 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23287 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
23288 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23289 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23290 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
23291 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23292 // CHECK22:       cond.true:
23293 // CHECK22-NEXT:    br label [[COND_END:%.*]]
23294 // CHECK22:       cond.false:
23295 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23296 // CHECK22-NEXT:    br label [[COND_END]]
23297 // CHECK22:       cond.end:
23298 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
23299 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23300 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23301 // CHECK22-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
23302 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23303 // CHECK22:       omp.inner.for.cond:
23304 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
23305 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
23306 // CHECK22-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
23307 // CHECK22-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23308 // CHECK22:       omp.inner.for.body:
23309 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
23310 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
23311 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23312 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
23313 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !21
23314 // CHECK22-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
23315 // CHECK22-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !21
23316 // CHECK22-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !21
23317 // CHECK22-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
23318 // CHECK22-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
23319 // CHECK22-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
23320 // CHECK22-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !21
23321 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23322 // CHECK22:       omp.body.continue:
23323 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23324 // CHECK22:       omp.inner.for.inc:
23325 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
23326 // CHECK22-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
23327 // CHECK22-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
23328 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
23329 // CHECK22:       omp.inner.for.end:
23330 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23331 // CHECK22:       omp.loop.exit:
23332 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
23333 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23334 // CHECK22-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
23335 // CHECK22-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23336 // CHECK22:       .omp.final.then:
23337 // CHECK22-NEXT:    store i32 10, i32* [[I]], align 4
23338 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23339 // CHECK22:       .omp.final.done:
23340 // CHECK22-NEXT:    ret void
23341 //
23342 //
23343 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
23344 // CHECK22-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR0]] {
23345 // CHECK22-NEXT:  entry:
23346 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
23347 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
23348 // CHECK22-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23349 // CHECK22-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
23350 // CHECK22-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
23351 // CHECK22-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
23352 // CHECK22-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
23353 // CHECK22-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
23354 // CHECK22-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
23355 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
23356 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
23357 // CHECK22-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
23358 // CHECK22-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23359 // CHECK22-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
23360 // CHECK22-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
23361 // CHECK22-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
23362 // CHECK22-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
23363 // CHECK22-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
23364 // CHECK22-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
23365 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
23366 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
23367 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23368 // CHECK22-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
23369 // CHECK22-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
23370 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
23371 // CHECK22-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
23372 // CHECK22-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
23373 // CHECK22-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
23374 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
23375 // CHECK22-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
23376 // CHECK22-NEXT:    store i32 [[TMP8]], i32* [[CONV5]], align 4
23377 // CHECK22-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
23378 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
23379 // CHECK22-NEXT:    ret void
23380 //
23381 //
23382 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..3
23383 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]]) #[[ATTR1]] {
23384 // CHECK22-NEXT:  entry:
23385 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23386 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23387 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
23388 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
23389 // CHECK22-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23390 // CHECK22-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
23391 // CHECK22-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
23392 // CHECK22-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
23393 // CHECK22-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
23394 // CHECK22-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
23395 // CHECK22-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
23396 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23397 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23398 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23399 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23400 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23401 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23402 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
23403 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23404 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23405 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
23406 // CHECK22-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
23407 // CHECK22-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23408 // CHECK22-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
23409 // CHECK22-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
23410 // CHECK22-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
23411 // CHECK22-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
23412 // CHECK22-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
23413 // CHECK22-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
23414 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
23415 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
23416 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23417 // CHECK22-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
23418 // CHECK22-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
23419 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
23420 // CHECK22-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
23421 // CHECK22-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
23422 // CHECK22-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
23423 // CHECK22-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 0
23424 // CHECK22-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
23425 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23426 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
23427 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23428 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23429 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23430 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
23431 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23432 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23433 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
23434 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23435 // CHECK22:       cond.true:
23436 // CHECK22-NEXT:    br label [[COND_END:%.*]]
23437 // CHECK22:       cond.false:
23438 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23439 // CHECK22-NEXT:    br label [[COND_END]]
23440 // CHECK22:       cond.end:
23441 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
23442 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23443 // CHECK22-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23444 // CHECK22-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
23445 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23446 // CHECK22:       omp.inner.for.cond:
23447 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
23448 // CHECK22-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
23449 // CHECK22-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
23450 // CHECK22-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23451 // CHECK22:       omp.inner.for.body:
23452 // CHECK22-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
23453 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
23454 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23455 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
23456 // CHECK22-NEXT:    [[TMP16:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !24
23457 // CHECK22-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
23458 // CHECK22-NEXT:    store i32 [[ADD6]], i32* [[CONV]], align 4, !llvm.access.group !24
23459 // CHECK22-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
23460 // CHECK22-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24
23461 // CHECK22-NEXT:    [[CONV7:%.*]] = fpext float [[TMP17]] to double
23462 // CHECK22-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
23463 // CHECK22-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
23464 // CHECK22-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24
23465 // CHECK22-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
23466 // CHECK22-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
23467 // CHECK22-NEXT:    [[CONV11:%.*]] = fpext float [[TMP18]] to double
23468 // CHECK22-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
23469 // CHECK22-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
23470 // CHECK22-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
23471 // CHECK22-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
23472 // CHECK22-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i64 0, i64 2
23473 // CHECK22-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
23474 // CHECK22-NEXT:    [[ADD16:%.*]] = fadd double [[TMP19]], 1.000000e+00
23475 // CHECK22-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
23476 // CHECK22-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]]
23477 // CHECK22-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP20]]
23478 // CHECK22-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i64 3
23479 // CHECK22-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
23480 // CHECK22-NEXT:    [[ADD19:%.*]] = fadd double [[TMP21]], 1.000000e+00
23481 // CHECK22-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
23482 // CHECK22-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
23483 // CHECK22-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !24
23484 // CHECK22-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP22]], 1
23485 // CHECK22-NEXT:    store i64 [[ADD20]], i64* [[X]], align 8, !llvm.access.group !24
23486 // CHECK22-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
23487 // CHECK22-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !24
23488 // CHECK22-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP23]] to i32
23489 // CHECK22-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
23490 // CHECK22-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
23491 // CHECK22-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 8, !llvm.access.group !24
23492 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23493 // CHECK22:       omp.body.continue:
23494 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23495 // CHECK22:       omp.inner.for.inc:
23496 // CHECK22-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
23497 // CHECK22-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP24]], 1
23498 // CHECK22-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
23499 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
23500 // CHECK22:       omp.inner.for.end:
23501 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23502 // CHECK22:       omp.loop.exit:
23503 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
23504 // CHECK22-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23505 // CHECK22-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
23506 // CHECK22-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23507 // CHECK22:       .omp.final.then:
23508 // CHECK22-NEXT:    store i32 10, i32* [[I]], align 4
23509 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23510 // CHECK22:       .omp.final.done:
23511 // CHECK22-NEXT:    ret void
23512 //
23513 //
23514 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
23515 // CHECK22-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
23516 // CHECK22-NEXT:  entry:
23517 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
23518 // CHECK22-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
23519 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23520 // CHECK22-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
23521 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
23522 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
23523 // CHECK22-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
23524 // CHECK22-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
23525 // CHECK22-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
23526 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
23527 // CHECK22-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23528 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23529 // CHECK22-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
23530 // CHECK22-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
23531 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
23532 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23533 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23534 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
23535 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
23536 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
23537 // CHECK22-NEXT:    [[CONV4:%.*]] = bitcast i64* [[A_CASTED]] to i32*
23538 // CHECK22-NEXT:    store i32 [[TMP1]], i32* [[CONV4]], align 4
23539 // CHECK22-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
23540 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[CONV1]], align 4
23541 // CHECK22-NEXT:    [[CONV5:%.*]] = bitcast i64* [[N_CASTED]] to i32*
23542 // CHECK22-NEXT:    store i32 [[TMP3]], i32* [[CONV5]], align 4
23543 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
23544 // CHECK22-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV2]], align 2
23545 // CHECK22-NEXT:    [[CONV6:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
23546 // CHECK22-NEXT:    store i16 [[TMP5]], i16* [[CONV6]], align 2
23547 // CHECK22-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AA_CASTED]], align 8
23548 // CHECK22-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
23549 // CHECK22-NEXT:    [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
23550 // CHECK22-NEXT:    store i8 [[TMP7]], i8* [[CONV7]], align 1
23551 // CHECK22-NEXT:    [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
23552 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]])
23553 // CHECK22-NEXT:    ret void
23554 //
23555 //
23556 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..4
23557 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
23558 // CHECK22-NEXT:  entry:
23559 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23560 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23561 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
23562 // CHECK22-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
23563 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23564 // CHECK22-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
23565 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
23566 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23567 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23568 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23569 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
23570 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4
23571 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
23572 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23573 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23574 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23575 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23576 // CHECK22-NEXT:    [[I8:%.*]] = alloca i32, align 4
23577 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23578 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23579 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
23580 // CHECK22-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
23581 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23582 // CHECK22-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
23583 // CHECK22-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
23584 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
23585 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
23586 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23587 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
23588 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
23589 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
23590 // CHECK22-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
23591 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
23592 // CHECK22-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_4]], align 4
23593 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
23594 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23595 // CHECK22-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
23596 // CHECK22-NEXT:    [[SUB6:%.*]] = sub i32 [[SUB]], 1
23597 // CHECK22-NEXT:    [[ADD:%.*]] = add i32 [[SUB6]], 1
23598 // CHECK22-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
23599 // CHECK22-NEXT:    [[SUB7:%.*]] = sub i32 [[DIV]], 1
23600 // CHECK22-NEXT:    store i32 [[SUB7]], i32* [[DOTCAPTURE_EXPR_5]], align 4
23601 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23602 // CHECK22-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
23603 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23604 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
23605 // CHECK22-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
23606 // CHECK22-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
23607 // CHECK22:       omp.precond.then:
23608 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23609 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
23610 // CHECK22-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
23611 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23612 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23613 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23614 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
23615 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23616 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23617 // CHECK22-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
23618 // CHECK22-NEXT:    [[CMP9:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
23619 // CHECK22-NEXT:    br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23620 // CHECK22:       cond.true:
23621 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5]], align 4
23622 // CHECK22-NEXT:    br label [[COND_END:%.*]]
23623 // CHECK22:       cond.false:
23624 // CHECK22-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23625 // CHECK22-NEXT:    br label [[COND_END]]
23626 // CHECK22:       cond.end:
23627 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
23628 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23629 // CHECK22-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23630 // CHECK22-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
23631 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23632 // CHECK22:       omp.inner.for.cond:
23633 // CHECK22-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23634 // CHECK22-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
23635 // CHECK22-NEXT:    [[ADD10:%.*]] = add i32 [[TMP17]], 1
23636 // CHECK22-NEXT:    [[CMP11:%.*]] = icmp ult i32 [[TMP16]], [[ADD10]]
23637 // CHECK22-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23638 // CHECK22:       omp.inner.for.body:
23639 // CHECK22-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !27
23640 // CHECK22-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23641 // CHECK22-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
23642 // CHECK22-NEXT:    [[ADD12:%.*]] = add i32 [[TMP18]], [[MUL]]
23643 // CHECK22-NEXT:    store i32 [[ADD12]], i32* [[I8]], align 4, !llvm.access.group !27
23644 // CHECK22-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !27
23645 // CHECK22-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP20]], 1
23646 // CHECK22-NEXT:    store i32 [[ADD13]], i32* [[CONV]], align 4, !llvm.access.group !27
23647 // CHECK22-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV2]], align 2, !llvm.access.group !27
23648 // CHECK22-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP21]] to i32
23649 // CHECK22-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
23650 // CHECK22-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
23651 // CHECK22-NEXT:    store i16 [[CONV16]], i16* [[CONV2]], align 2, !llvm.access.group !27
23652 // CHECK22-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV3]], align 1, !llvm.access.group !27
23653 // CHECK22-NEXT:    [[CONV17:%.*]] = sext i8 [[TMP22]] to i32
23654 // CHECK22-NEXT:    [[ADD18:%.*]] = add nsw i32 [[CONV17]], 1
23655 // CHECK22-NEXT:    [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8
23656 // CHECK22-NEXT:    store i8 [[CONV19]], i8* [[CONV3]], align 1, !llvm.access.group !27
23657 // CHECK22-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
23658 // CHECK22-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
23659 // CHECK22-NEXT:    [[ADD20:%.*]] = add nsw i32 [[TMP23]], 1
23660 // CHECK22-NEXT:    store i32 [[ADD20]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
23661 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23662 // CHECK22:       omp.body.continue:
23663 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23664 // CHECK22:       omp.inner.for.inc:
23665 // CHECK22-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23666 // CHECK22-NEXT:    [[ADD21:%.*]] = add i32 [[TMP24]], 1
23667 // CHECK22-NEXT:    store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
23668 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
23669 // CHECK22:       omp.inner.for.end:
23670 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23671 // CHECK22:       omp.loop.exit:
23672 // CHECK22-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23673 // CHECK22-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
23674 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
23675 // CHECK22-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23676 // CHECK22-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
23677 // CHECK22-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23678 // CHECK22:       .omp.final.then:
23679 // CHECK22-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23680 // CHECK22-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4
23681 // CHECK22-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
23682 // CHECK22-NEXT:    [[SUB22:%.*]] = sub i32 [[TMP30]], [[TMP31]]
23683 // CHECK22-NEXT:    [[SUB23:%.*]] = sub i32 [[SUB22]], 1
23684 // CHECK22-NEXT:    [[ADD24:%.*]] = add i32 [[SUB23]], 1
23685 // CHECK22-NEXT:    [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
23686 // CHECK22-NEXT:    [[MUL26:%.*]] = mul i32 [[DIV25]], 1
23687 // CHECK22-NEXT:    [[ADD27:%.*]] = add i32 [[TMP29]], [[MUL26]]
23688 // CHECK22-NEXT:    store i32 [[ADD27]], i32* [[I8]], align 4
23689 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23690 // CHECK22:       .omp.final.done:
23691 // CHECK22-NEXT:    br label [[OMP_PRECOND_END]]
23692 // CHECK22:       omp.precond.end:
23693 // CHECK22-NEXT:    ret void
23694 //
23695 //
23696 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
23697 // CHECK22-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
23698 // CHECK22-NEXT:  entry:
23699 // CHECK22-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
23700 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
23701 // CHECK22-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23702 // CHECK22-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
23703 // CHECK22-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
23704 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
23705 // CHECK22-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
23706 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
23707 // CHECK22-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
23708 // CHECK22-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
23709 // CHECK22-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23710 // CHECK22-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
23711 // CHECK22-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
23712 // CHECK22-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
23713 // CHECK22-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
23714 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
23715 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23716 // CHECK22-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
23717 // CHECK22-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
23718 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
23719 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
23720 // CHECK22-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32*
23721 // CHECK22-NEXT:    store i32 [[TMP4]], i32* [[CONV4]], align 4
23722 // CHECK22-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
23723 // CHECK22-NEXT:    [[TMP6:%.*]] = load i8, i8* [[CONV3]], align 1
23724 // CHECK22-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
23725 // CHECK22-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
23726 // CHECK22-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
23727 // CHECK22-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV5]], align 1
23728 // CHECK22-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
23729 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]], i64 [[TMP7]])
23730 // CHECK22-NEXT:    ret void
23731 //
23732 //
23733 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..5
23734 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
23735 // CHECK22-NEXT:  entry:
23736 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23737 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23738 // CHECK22-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
23739 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
23740 // CHECK22-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
23741 // CHECK22-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
23742 // CHECK22-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
23743 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
23744 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23745 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23746 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23747 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23748 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23749 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23750 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
23751 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23752 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23753 // CHECK22-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
23754 // CHECK22-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
23755 // CHECK22-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
23756 // CHECK22-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
23757 // CHECK22-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
23758 // CHECK22-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
23759 // CHECK22-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
23760 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
23761 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
23762 // CHECK22-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
23763 // CHECK22-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
23764 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
23765 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23766 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
23767 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23768 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23769 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23770 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
23771 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23772 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23773 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
23774 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23775 // CHECK22:       cond.true:
23776 // CHECK22-NEXT:    br label [[COND_END:%.*]]
23777 // CHECK22:       cond.false:
23778 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23779 // CHECK22-NEXT:    br label [[COND_END]]
23780 // CHECK22:       cond.end:
23781 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
23782 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23783 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23784 // CHECK22-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
23785 // CHECK22-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1
23786 // CHECK22-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
23787 // CHECK22-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
23788 // CHECK22:       omp_if.then:
23789 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23790 // CHECK22:       omp.inner.for.cond:
23791 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23792 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
23793 // CHECK22-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
23794 // CHECK22-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23795 // CHECK22:       omp.inner.for.body:
23796 // CHECK22-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23797 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
23798 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23799 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !30
23800 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !30
23801 // CHECK22-NEXT:    [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double
23802 // CHECK22-NEXT:    [[ADD6:%.*]] = fadd double [[CONV5]], 1.500000e+00
23803 // CHECK22-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
23804 // CHECK22-NEXT:    store double [[ADD6]], double* [[A]], align 8, !llvm.access.group !30
23805 // CHECK22-NEXT:    [[A7:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
23806 // CHECK22-NEXT:    [[TMP14:%.*]] = load double, double* [[A7]], align 8, !llvm.access.group !30
23807 // CHECK22-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
23808 // CHECK22-NEXT:    store double [[INC]], double* [[A7]], align 8, !llvm.access.group !30
23809 // CHECK22-NEXT:    [[CONV8:%.*]] = fptosi double [[INC]] to i16
23810 // CHECK22-NEXT:    [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]]
23811 // CHECK22-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]]
23812 // CHECK22-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
23813 // CHECK22-NEXT:    store i16 [[CONV8]], i16* [[ARRAYIDX9]], align 2, !llvm.access.group !30
23814 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23815 // CHECK22:       omp.body.continue:
23816 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23817 // CHECK22:       omp.inner.for.inc:
23818 // CHECK22-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23819 // CHECK22-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
23820 // CHECK22-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
23821 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
23822 // CHECK22:       omp.inner.for.end:
23823 // CHECK22-NEXT:    br label [[OMP_IF_END:%.*]]
23824 // CHECK22:       omp_if.else:
23825 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND11:%.*]]
23826 // CHECK22:       omp.inner.for.cond11:
23827 // CHECK22-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23828 // CHECK22-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23829 // CHECK22-NEXT:    [[CMP12:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
23830 // CHECK22-NEXT:    br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END27:%.*]]
23831 // CHECK22:       omp.inner.for.body13:
23832 // CHECK22-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23833 // CHECK22-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[TMP19]], 1
23834 // CHECK22-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
23835 // CHECK22-NEXT:    store i32 [[ADD15]], i32* [[I]], align 4
23836 // CHECK22-NEXT:    [[TMP20:%.*]] = load i32, i32* [[CONV]], align 4
23837 // CHECK22-NEXT:    [[CONV16:%.*]] = sitofp i32 [[TMP20]] to double
23838 // CHECK22-NEXT:    [[ADD17:%.*]] = fadd double [[CONV16]], 1.500000e+00
23839 // CHECK22-NEXT:    [[A18:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
23840 // CHECK22-NEXT:    store double [[ADD17]], double* [[A18]], align 8
23841 // CHECK22-NEXT:    [[A19:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
23842 // CHECK22-NEXT:    [[TMP21:%.*]] = load double, double* [[A19]], align 8
23843 // CHECK22-NEXT:    [[INC20:%.*]] = fadd double [[TMP21]], 1.000000e+00
23844 // CHECK22-NEXT:    store double [[INC20]], double* [[A19]], align 8
23845 // CHECK22-NEXT:    [[CONV21:%.*]] = fptosi double [[INC20]] to i16
23846 // CHECK22-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
23847 // CHECK22-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP22]]
23848 // CHECK22-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX22]], i64 1
23849 // CHECK22-NEXT:    store i16 [[CONV21]], i16* [[ARRAYIDX23]], align 2
23850 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE24:%.*]]
23851 // CHECK22:       omp.body.continue24:
23852 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC25:%.*]]
23853 // CHECK22:       omp.inner.for.inc25:
23854 // CHECK22-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23855 // CHECK22-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP23]], 1
23856 // CHECK22-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4
23857 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND11]], !llvm.loop [[LOOP33:![0-9]+]]
23858 // CHECK22:       omp.inner.for.end27:
23859 // CHECK22-NEXT:    br label [[OMP_IF_END]]
23860 // CHECK22:       omp_if.end:
23861 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23862 // CHECK22:       omp.loop.exit:
23863 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
23864 // CHECK22-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23865 // CHECK22-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
23866 // CHECK22-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23867 // CHECK22:       .omp.final.then:
23868 // CHECK22-NEXT:    store i32 10, i32* [[I]], align 4
23869 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23870 // CHECK22:       .omp.final.done:
23871 // CHECK22-NEXT:    ret void
23872 //
23873 //
23874 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
23875 // CHECK22-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
23876 // CHECK22-NEXT:  entry:
23877 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
23878 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23879 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
23880 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
23881 // CHECK22-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
23882 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
23883 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23884 // CHECK22-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
23885 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
23886 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23887 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
23888 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
23889 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
23890 // CHECK22-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
23891 // CHECK22-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
23892 // CHECK22-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
23893 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
23894 // CHECK22-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
23895 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
23896 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
23897 // CHECK22-NEXT:    ret void
23898 //
23899 //
23900 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..6
23901 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
23902 // CHECK22-NEXT:  entry:
23903 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
23904 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
23905 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
23906 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
23907 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
23908 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23909 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23910 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23911 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23912 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23913 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23914 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
23915 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
23916 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
23917 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
23918 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
23919 // CHECK22-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
23920 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
23921 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
23922 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
23923 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23924 // CHECK22-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
23925 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23926 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23927 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
23928 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
23929 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23930 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23931 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
23932 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23933 // CHECK22:       cond.true:
23934 // CHECK22-NEXT:    br label [[COND_END:%.*]]
23935 // CHECK22:       cond.false:
23936 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23937 // CHECK22-NEXT:    br label [[COND_END]]
23938 // CHECK22:       cond.end:
23939 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
23940 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23941 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23942 // CHECK22-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
23943 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23944 // CHECK22:       omp.inner.for.cond:
23945 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
23946 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
23947 // CHECK22-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
23948 // CHECK22-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23949 // CHECK22:       omp.inner.for.body:
23950 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
23951 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
23952 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
23953 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !35
23954 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !35
23955 // CHECK22-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
23956 // CHECK22-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !35
23957 // CHECK22-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !35
23958 // CHECK22-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
23959 // CHECK22-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
23960 // CHECK22-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
23961 // CHECK22-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !35
23962 // CHECK22-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
23963 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !35
23964 // CHECK22-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
23965 // CHECK22-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !35
23966 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23967 // CHECK22:       omp.body.continue:
23968 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23969 // CHECK22:       omp.inner.for.inc:
23970 // CHECK22-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
23971 // CHECK22-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP12]], 1
23972 // CHECK22-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
23973 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
23974 // CHECK22:       omp.inner.for.end:
23975 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23976 // CHECK22:       omp.loop.exit:
23977 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
23978 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23979 // CHECK22-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
23980 // CHECK22-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23981 // CHECK22:       .omp.final.then:
23982 // CHECK22-NEXT:    store i32 10, i32* [[I]], align 4
23983 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23984 // CHECK22:       .omp.final.done:
23985 // CHECK22-NEXT:    ret void
23986 //
23987 //
23988 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
23989 // CHECK23-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
23990 // CHECK23-NEXT:  entry:
23991 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
23992 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
23993 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
23994 // CHECK23-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
23995 // CHECK23-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
23996 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
23997 // CHECK23-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
23998 // CHECK23-NEXT:    store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
23999 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24000 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24001 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
24002 // CHECK23-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
24003 // CHECK23-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
24004 // CHECK23-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
24005 // CHECK23-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
24006 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
24007 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP4]])
24008 // CHECK23-NEXT:    ret void
24009 //
24010 //
24011 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined.
24012 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
24013 // CHECK23-NEXT:  entry:
24014 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24015 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24016 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24017 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24018 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24019 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24020 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24021 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24022 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24023 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
24024 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24025 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24026 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24027 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24028 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24029 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24030 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24031 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24032 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24033 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
24034 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24035 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24036 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
24037 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24038 // CHECK23:       cond.true:
24039 // CHECK23-NEXT:    br label [[COND_END:%.*]]
24040 // CHECK23:       cond.false:
24041 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24042 // CHECK23-NEXT:    br label [[COND_END]]
24043 // CHECK23:       cond.end:
24044 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
24045 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24046 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24047 // CHECK23-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
24048 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24049 // CHECK23:       omp.inner.for.cond:
24050 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
24051 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
24052 // CHECK23-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
24053 // CHECK23-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24054 // CHECK23:       omp.inner.for.body:
24055 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
24056 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
24057 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24058 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
24059 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24060 // CHECK23:       omp.body.continue:
24061 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24062 // CHECK23:       omp.inner.for.inc:
24063 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
24064 // CHECK23-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
24065 // CHECK23-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
24066 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
24067 // CHECK23:       omp.inner.for.end:
24068 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24069 // CHECK23:       omp.loop.exit:
24070 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
24071 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24072 // CHECK23-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
24073 // CHECK23-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24074 // CHECK23:       .omp.final.then:
24075 // CHECK23-NEXT:    store i32 10, i32* [[I]], align 4
24076 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24077 // CHECK23:       .omp.final.done:
24078 // CHECK23-NEXT:    ret void
24079 //
24080 //
24081 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
24082 // CHECK23-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR0]] {
24083 // CHECK23-NEXT:  entry:
24084 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24085 // CHECK23-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
24086 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24087 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24088 // CHECK23-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
24089 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
24090 // CHECK23-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
24091 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
24092 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]])
24093 // CHECK23-NEXT:    ret void
24094 //
24095 //
24096 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..1
24097 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
24098 // CHECK23-NEXT:  entry:
24099 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24100 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24101 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24102 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24103 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24104 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24105 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24106 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24107 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24108 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
24109 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24110 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24111 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24112 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24113 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24114 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24115 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24116 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24117 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24118 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
24119 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24120 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24121 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
24122 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24123 // CHECK23:       cond.true:
24124 // CHECK23-NEXT:    br label [[COND_END:%.*]]
24125 // CHECK23:       cond.false:
24126 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24127 // CHECK23-NEXT:    br label [[COND_END]]
24128 // CHECK23:       cond.end:
24129 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
24130 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24131 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24132 // CHECK23-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
24133 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24134 // CHECK23:       omp.inner.for.cond:
24135 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
24136 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
24137 // CHECK23-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
24138 // CHECK23-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24139 // CHECK23:       omp.inner.for.body:
24140 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
24141 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
24142 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24143 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19
24144 // CHECK23-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !19
24145 // CHECK23-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
24146 // CHECK23-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
24147 // CHECK23-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
24148 // CHECK23-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !19
24149 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24150 // CHECK23:       omp.body.continue:
24151 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24152 // CHECK23:       omp.inner.for.inc:
24153 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
24154 // CHECK23-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
24155 // CHECK23-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
24156 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
24157 // CHECK23:       omp.inner.for.end:
24158 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24159 // CHECK23:       omp.loop.exit:
24160 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
24161 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24162 // CHECK23-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
24163 // CHECK23-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24164 // CHECK23:       .omp.final.then:
24165 // CHECK23-NEXT:    store i32 10, i32* [[I]], align 4
24166 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24167 // CHECK23:       .omp.final.done:
24168 // CHECK23-NEXT:    ret void
24169 //
24170 //
24171 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
24172 // CHECK23-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
24173 // CHECK23-NEXT:  entry:
24174 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
24175 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24176 // CHECK23-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
24177 // CHECK23-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
24178 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
24179 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24180 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24181 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
24182 // CHECK23-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
24183 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
24184 // CHECK23-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
24185 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
24186 // CHECK23-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
24187 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
24188 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
24189 // CHECK23-NEXT:    ret void
24190 //
24191 //
24192 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..2
24193 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
24194 // CHECK23-NEXT:  entry:
24195 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24196 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24197 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
24198 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24199 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24200 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24201 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24202 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24203 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24204 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24205 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
24206 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24207 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24208 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
24209 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24210 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24211 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24212 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24213 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24214 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24215 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24216 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
24217 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24218 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24219 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
24220 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24221 // CHECK23:       cond.true:
24222 // CHECK23-NEXT:    br label [[COND_END:%.*]]
24223 // CHECK23:       cond.false:
24224 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24225 // CHECK23-NEXT:    br label [[COND_END]]
24226 // CHECK23:       cond.end:
24227 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
24228 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24229 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24230 // CHECK23-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
24231 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24232 // CHECK23:       omp.inner.for.cond:
24233 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
24234 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
24235 // CHECK23-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
24236 // CHECK23-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24237 // CHECK23:       omp.inner.for.body:
24238 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
24239 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
24240 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24241 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22
24242 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !22
24243 // CHECK23-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
24244 // CHECK23-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !22
24245 // CHECK23-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !22
24246 // CHECK23-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP9]] to i32
24247 // CHECK23-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
24248 // CHECK23-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
24249 // CHECK23-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !22
24250 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24251 // CHECK23:       omp.body.continue:
24252 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24253 // CHECK23:       omp.inner.for.inc:
24254 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
24255 // CHECK23-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1
24256 // CHECK23-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
24257 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
24258 // CHECK23:       omp.inner.for.end:
24259 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24260 // CHECK23:       omp.loop.exit:
24261 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
24262 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24263 // CHECK23-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
24264 // CHECK23-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24265 // CHECK23:       .omp.final.then:
24266 // CHECK23-NEXT:    store i32 10, i32* [[I]], align 4
24267 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24268 // CHECK23:       .omp.final.done:
24269 // CHECK23-NEXT:    ret void
24270 //
24271 //
24272 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
24273 // CHECK23-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
24274 // CHECK23-NEXT:  entry:
24275 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
24276 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
24277 // CHECK23-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
24278 // CHECK23-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
24279 // CHECK23-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
24280 // CHECK23-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
24281 // CHECK23-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
24282 // CHECK23-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
24283 // CHECK23-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
24284 // CHECK23-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
24285 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
24286 // CHECK23-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
24287 // CHECK23-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
24288 // CHECK23-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
24289 // CHECK23-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
24290 // CHECK23-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
24291 // CHECK23-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
24292 // CHECK23-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
24293 // CHECK23-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
24294 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
24295 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
24296 // CHECK23-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
24297 // CHECK23-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
24298 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
24299 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
24300 // CHECK23-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
24301 // CHECK23-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
24302 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
24303 // CHECK23-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
24304 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
24305 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
24306 // CHECK23-NEXT:    ret void
24307 //
24308 //
24309 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..3
24310 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR1]] {
24311 // CHECK23-NEXT:  entry:
24312 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24313 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24314 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
24315 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
24316 // CHECK23-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
24317 // CHECK23-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
24318 // CHECK23-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
24319 // CHECK23-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
24320 // CHECK23-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
24321 // CHECK23-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
24322 // CHECK23-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
24323 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24324 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24325 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24326 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24327 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24328 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24329 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
24330 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24331 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24332 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
24333 // CHECK23-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
24334 // CHECK23-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
24335 // CHECK23-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
24336 // CHECK23-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
24337 // CHECK23-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
24338 // CHECK23-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
24339 // CHECK23-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
24340 // CHECK23-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
24341 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
24342 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
24343 // CHECK23-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
24344 // CHECK23-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
24345 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
24346 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
24347 // CHECK23-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
24348 // CHECK23-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
24349 // CHECK23-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 0
24350 // CHECK23-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
24351 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24352 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24353 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24354 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24355 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24356 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
24357 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24358 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24359 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
24360 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24361 // CHECK23:       cond.true:
24362 // CHECK23-NEXT:    br label [[COND_END:%.*]]
24363 // CHECK23:       cond.false:
24364 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24365 // CHECK23-NEXT:    br label [[COND_END]]
24366 // CHECK23:       cond.end:
24367 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
24368 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24369 // CHECK23-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24370 // CHECK23-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
24371 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24372 // CHECK23:       omp.inner.for.cond:
24373 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
24374 // CHECK23-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
24375 // CHECK23-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
24376 // CHECK23-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24377 // CHECK23:       omp.inner.for.body:
24378 // CHECK23-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
24379 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
24380 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24381 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
24382 // CHECK23-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !25
24383 // CHECK23-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
24384 // CHECK23-NEXT:    store i32 [[ADD6]], i32* [[A_ADDR]], align 4, !llvm.access.group !25
24385 // CHECK23-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
24386 // CHECK23-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !25
24387 // CHECK23-NEXT:    [[CONV:%.*]] = fpext float [[TMP17]] to double
24388 // CHECK23-NEXT:    [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00
24389 // CHECK23-NEXT:    [[CONV8:%.*]] = fptrunc double [[ADD7]] to float
24390 // CHECK23-NEXT:    store float [[CONV8]], float* [[ARRAYIDX]], align 4, !llvm.access.group !25
24391 // CHECK23-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
24392 // CHECK23-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4, !llvm.access.group !25
24393 // CHECK23-NEXT:    [[CONV10:%.*]] = fpext float [[TMP18]] to double
24394 // CHECK23-NEXT:    [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00
24395 // CHECK23-NEXT:    [[CONV12:%.*]] = fptrunc double [[ADD11]] to float
24396 // CHECK23-NEXT:    store float [[CONV12]], float* [[ARRAYIDX9]], align 4, !llvm.access.group !25
24397 // CHECK23-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
24398 // CHECK23-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX13]], i32 0, i32 2
24399 // CHECK23-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !25
24400 // CHECK23-NEXT:    [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00
24401 // CHECK23-NEXT:    store double [[ADD15]], double* [[ARRAYIDX14]], align 8, !llvm.access.group !25
24402 // CHECK23-NEXT:    [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]]
24403 // CHECK23-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP20]]
24404 // CHECK23-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX16]], i32 3
24405 // CHECK23-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !25
24406 // CHECK23-NEXT:    [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00
24407 // CHECK23-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !25
24408 // CHECK23-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
24409 // CHECK23-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !25
24410 // CHECK23-NEXT:    [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1
24411 // CHECK23-NEXT:    store i64 [[ADD19]], i64* [[X]], align 4, !llvm.access.group !25
24412 // CHECK23-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
24413 // CHECK23-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !25
24414 // CHECK23-NEXT:    [[CONV20:%.*]] = sext i8 [[TMP23]] to i32
24415 // CHECK23-NEXT:    [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1
24416 // CHECK23-NEXT:    [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8
24417 // CHECK23-NEXT:    store i8 [[CONV22]], i8* [[Y]], align 4, !llvm.access.group !25
24418 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24419 // CHECK23:       omp.body.continue:
24420 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24421 // CHECK23:       omp.inner.for.inc:
24422 // CHECK23-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
24423 // CHECK23-NEXT:    [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1
24424 // CHECK23-NEXT:    store i32 [[ADD23]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
24425 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
24426 // CHECK23:       omp.inner.for.end:
24427 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24428 // CHECK23:       omp.loop.exit:
24429 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
24430 // CHECK23-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24431 // CHECK23-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
24432 // CHECK23-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24433 // CHECK23:       .omp.final.then:
24434 // CHECK23-NEXT:    store i32 10, i32* [[I]], align 4
24435 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24436 // CHECK23:       .omp.final.done:
24437 // CHECK23-NEXT:    ret void
24438 //
24439 //
24440 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
24441 // CHECK23-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
24442 // CHECK23-NEXT:  entry:
24443 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
24444 // CHECK23-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24445 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24446 // CHECK23-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
24447 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
24448 // CHECK23-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
24449 // CHECK23-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
24450 // CHECK23-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
24451 // CHECK23-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
24452 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
24453 // CHECK23-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24454 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24455 // CHECK23-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
24456 // CHECK23-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
24457 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24458 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
24459 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
24460 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
24461 // CHECK23-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
24462 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
24463 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
24464 // CHECK23-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
24465 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
24466 // CHECK23-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
24467 // CHECK23-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
24468 // CHECK23-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
24469 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AA_CASTED]], align 4
24470 // CHECK23-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV1]], align 1
24471 // CHECK23-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
24472 // CHECK23-NEXT:    store i8 [[TMP7]], i8* [[CONV3]], align 1
24473 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
24474 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]])
24475 // CHECK23-NEXT:    ret void
24476 //
24477 //
24478 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..4
24479 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
24480 // CHECK23-NEXT:  entry:
24481 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24482 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24483 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
24484 // CHECK23-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24485 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24486 // CHECK23-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
24487 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
24488 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24489 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24490 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24491 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
24492 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
24493 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
24494 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24495 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24496 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24497 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24498 // CHECK23-NEXT:    [[I6:%.*]] = alloca i32, align 4
24499 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24500 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24501 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
24502 // CHECK23-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24503 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24504 // CHECK23-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
24505 // CHECK23-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
24506 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24507 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
24508 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
24509 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
24510 // CHECK23-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
24511 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
24512 // CHECK23-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
24513 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24514 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24515 // CHECK23-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
24516 // CHECK23-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
24517 // CHECK23-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
24518 // CHECK23-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
24519 // CHECK23-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
24520 // CHECK23-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
24521 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24522 // CHECK23-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
24523 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24524 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24525 // CHECK23-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
24526 // CHECK23-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
24527 // CHECK23:       omp.precond.then:
24528 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24529 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
24530 // CHECK23-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
24531 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24532 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24533 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24534 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
24535 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24536 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24537 // CHECK23-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
24538 // CHECK23-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
24539 // CHECK23-NEXT:    br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24540 // CHECK23:       cond.true:
24541 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
24542 // CHECK23-NEXT:    br label [[COND_END:%.*]]
24543 // CHECK23:       cond.false:
24544 // CHECK23-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24545 // CHECK23-NEXT:    br label [[COND_END]]
24546 // CHECK23:       cond.end:
24547 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
24548 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24549 // CHECK23-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24550 // CHECK23-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
24551 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24552 // CHECK23:       omp.inner.for.cond:
24553 // CHECK23-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
24554 // CHECK23-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
24555 // CHECK23-NEXT:    [[ADD8:%.*]] = add i32 [[TMP17]], 1
24556 // CHECK23-NEXT:    [[CMP9:%.*]] = icmp ult i32 [[TMP16]], [[ADD8]]
24557 // CHECK23-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24558 // CHECK23:       omp.inner.for.body:
24559 // CHECK23-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !28
24560 // CHECK23-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
24561 // CHECK23-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
24562 // CHECK23-NEXT:    [[ADD10:%.*]] = add i32 [[TMP18]], [[MUL]]
24563 // CHECK23-NEXT:    store i32 [[ADD10]], i32* [[I6]], align 4, !llvm.access.group !28
24564 // CHECK23-NEXT:    [[TMP20:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !28
24565 // CHECK23-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP20]], 1
24566 // CHECK23-NEXT:    store i32 [[ADD11]], i32* [[A_ADDR]], align 4, !llvm.access.group !28
24567 // CHECK23-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !28
24568 // CHECK23-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP21]] to i32
24569 // CHECK23-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
24570 // CHECK23-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
24571 // CHECK23-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !28
24572 // CHECK23-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV1]], align 1, !llvm.access.group !28
24573 // CHECK23-NEXT:    [[CONV15:%.*]] = sext i8 [[TMP22]] to i32
24574 // CHECK23-NEXT:    [[ADD16:%.*]] = add nsw i32 [[CONV15]], 1
24575 // CHECK23-NEXT:    [[CONV17:%.*]] = trunc i32 [[ADD16]] to i8
24576 // CHECK23-NEXT:    store i8 [[CONV17]], i8* [[CONV1]], align 1, !llvm.access.group !28
24577 // CHECK23-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
24578 // CHECK23-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
24579 // CHECK23-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP23]], 1
24580 // CHECK23-NEXT:    store i32 [[ADD18]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
24581 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24582 // CHECK23:       omp.body.continue:
24583 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24584 // CHECK23:       omp.inner.for.inc:
24585 // CHECK23-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
24586 // CHECK23-NEXT:    [[ADD19:%.*]] = add i32 [[TMP24]], 1
24587 // CHECK23-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
24588 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
24589 // CHECK23:       omp.inner.for.end:
24590 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24591 // CHECK23:       omp.loop.exit:
24592 // CHECK23-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24593 // CHECK23-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
24594 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
24595 // CHECK23-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24596 // CHECK23-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
24597 // CHECK23-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24598 // CHECK23:       .omp.final.then:
24599 // CHECK23-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24600 // CHECK23-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
24601 // CHECK23-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
24602 // CHECK23-NEXT:    [[SUB20:%.*]] = sub i32 [[TMP30]], [[TMP31]]
24603 // CHECK23-NEXT:    [[SUB21:%.*]] = sub i32 [[SUB20]], 1
24604 // CHECK23-NEXT:    [[ADD22:%.*]] = add i32 [[SUB21]], 1
24605 // CHECK23-NEXT:    [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
24606 // CHECK23-NEXT:    [[MUL24:%.*]] = mul i32 [[DIV23]], 1
24607 // CHECK23-NEXT:    [[ADD25:%.*]] = add i32 [[TMP29]], [[MUL24]]
24608 // CHECK23-NEXT:    store i32 [[ADD25]], i32* [[I6]], align 4
24609 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24610 // CHECK23:       .omp.final.done:
24611 // CHECK23-NEXT:    br label [[OMP_PRECOND_END]]
24612 // CHECK23:       omp.precond.end:
24613 // CHECK23-NEXT:    ret void
24614 //
24615 //
24616 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
24617 // CHECK23-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
24618 // CHECK23-NEXT:  entry:
24619 // CHECK23-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
24620 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
24621 // CHECK23-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
24622 // CHECK23-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
24623 // CHECK23-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
24624 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
24625 // CHECK23-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
24626 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
24627 // CHECK23-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
24628 // CHECK23-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
24629 // CHECK23-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
24630 // CHECK23-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
24631 // CHECK23-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
24632 // CHECK23-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24633 // CHECK23-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
24634 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
24635 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
24636 // CHECK23-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
24637 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
24638 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
24639 // CHECK23-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
24640 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
24641 // CHECK23-NEXT:    [[TMP6:%.*]] = load i8, i8* [[CONV]], align 1
24642 // CHECK23-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
24643 // CHECK23-NEXT:    [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
24644 // CHECK23-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
24645 // CHECK23-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV3]], align 1
24646 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
24647 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]], i32 [[TMP7]])
24648 // CHECK23-NEXT:    ret void
24649 //
24650 //
24651 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..5
24652 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
24653 // CHECK23-NEXT:  entry:
24654 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24655 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24656 // CHECK23-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
24657 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
24658 // CHECK23-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
24659 // CHECK23-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
24660 // CHECK23-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
24661 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
24662 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24663 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24664 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24665 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24666 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24667 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24668 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
24669 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24670 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24671 // CHECK23-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
24672 // CHECK23-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
24673 // CHECK23-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
24674 // CHECK23-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
24675 // CHECK23-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
24676 // CHECK23-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24677 // CHECK23-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
24678 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
24679 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
24680 // CHECK23-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
24681 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
24682 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24683 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24684 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24685 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24686 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24687 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
24688 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24689 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24690 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
24691 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24692 // CHECK23:       cond.true:
24693 // CHECK23-NEXT:    br label [[COND_END:%.*]]
24694 // CHECK23:       cond.false:
24695 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24696 // CHECK23-NEXT:    br label [[COND_END]]
24697 // CHECK23:       cond.end:
24698 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
24699 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24700 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24701 // CHECK23-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
24702 // CHECK23-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1
24703 // CHECK23-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
24704 // CHECK23-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
24705 // CHECK23:       omp_if.then:
24706 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24707 // CHECK23:       omp.inner.for.cond:
24708 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
24709 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
24710 // CHECK23-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
24711 // CHECK23-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24712 // CHECK23:       omp.inner.for.body:
24713 // CHECK23-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
24714 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
24715 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24716 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !31
24717 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !31
24718 // CHECK23-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double
24719 // CHECK23-NEXT:    [[ADD5:%.*]] = fadd double [[CONV4]], 1.500000e+00
24720 // CHECK23-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
24721 // CHECK23-NEXT:    store double [[ADD5]], double* [[A]], align 4, !llvm.access.group !31
24722 // CHECK23-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
24723 // CHECK23-NEXT:    [[TMP14:%.*]] = load double, double* [[A6]], align 4, !llvm.access.group !31
24724 // CHECK23-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
24725 // CHECK23-NEXT:    store double [[INC]], double* [[A6]], align 4, !llvm.access.group !31
24726 // CHECK23-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
24727 // CHECK23-NEXT:    [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]]
24728 // CHECK23-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]]
24729 // CHECK23-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
24730 // CHECK23-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !31
24731 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24732 // CHECK23:       omp.body.continue:
24733 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24734 // CHECK23:       omp.inner.for.inc:
24735 // CHECK23-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
24736 // CHECK23-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
24737 // CHECK23-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
24738 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
24739 // CHECK23:       omp.inner.for.end:
24740 // CHECK23-NEXT:    br label [[OMP_IF_END:%.*]]
24741 // CHECK23:       omp_if.else:
24742 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
24743 // CHECK23:       omp.inner.for.cond10:
24744 // CHECK23-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24745 // CHECK23-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24746 // CHECK23-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
24747 // CHECK23-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END26:%.*]]
24748 // CHECK23:       omp.inner.for.body12:
24749 // CHECK23-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24750 // CHECK23-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1
24751 // CHECK23-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
24752 // CHECK23-NEXT:    store i32 [[ADD14]], i32* [[I]], align 4
24753 // CHECK23-NEXT:    [[TMP20:%.*]] = load i32, i32* [[B_ADDR]], align 4
24754 // CHECK23-NEXT:    [[CONV15:%.*]] = sitofp i32 [[TMP20]] to double
24755 // CHECK23-NEXT:    [[ADD16:%.*]] = fadd double [[CONV15]], 1.500000e+00
24756 // CHECK23-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
24757 // CHECK23-NEXT:    store double [[ADD16]], double* [[A17]], align 4
24758 // CHECK23-NEXT:    [[A18:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
24759 // CHECK23-NEXT:    [[TMP21:%.*]] = load double, double* [[A18]], align 4
24760 // CHECK23-NEXT:    [[INC19:%.*]] = fadd double [[TMP21]], 1.000000e+00
24761 // CHECK23-NEXT:    store double [[INC19]], double* [[A18]], align 4
24762 // CHECK23-NEXT:    [[CONV20:%.*]] = fptosi double [[INC19]] to i16
24763 // CHECK23-NEXT:    [[TMP22:%.*]] = mul nsw i32 1, [[TMP2]]
24764 // CHECK23-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP22]]
24765 // CHECK23-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX21]], i32 1
24766 // CHECK23-NEXT:    store i16 [[CONV20]], i16* [[ARRAYIDX22]], align 2
24767 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE23:%.*]]
24768 // CHECK23:       omp.body.continue23:
24769 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC24:%.*]]
24770 // CHECK23:       omp.inner.for.inc24:
24771 // CHECK23-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
24772 // CHECK23-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP23]], 1
24773 // CHECK23-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_IV]], align 4
24774 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP34:![0-9]+]]
24775 // CHECK23:       omp.inner.for.end26:
24776 // CHECK23-NEXT:    br label [[OMP_IF_END]]
24777 // CHECK23:       omp_if.end:
24778 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24779 // CHECK23:       omp.loop.exit:
24780 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
24781 // CHECK23-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24782 // CHECK23-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
24783 // CHECK23-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24784 // CHECK23:       .omp.final.then:
24785 // CHECK23-NEXT:    store i32 10, i32* [[I]], align 4
24786 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24787 // CHECK23:       .omp.final.done:
24788 // CHECK23-NEXT:    ret void
24789 //
24790 //
24791 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
24792 // CHECK23-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
24793 // CHECK23-NEXT:  entry:
24794 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
24795 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24796 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
24797 // CHECK23-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
24798 // CHECK23-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
24799 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
24800 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24801 // CHECK23-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
24802 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24803 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
24804 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
24805 // CHECK23-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
24806 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
24807 // CHECK23-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
24808 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
24809 // CHECK23-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
24810 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
24811 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
24812 // CHECK23-NEXT:    ret void
24813 //
24814 //
24815 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..6
24816 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
24817 // CHECK23-NEXT:  entry:
24818 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24819 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24820 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
24821 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24822 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
24823 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24824 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24825 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24826 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24827 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24828 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24829 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
24830 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24831 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24832 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
24833 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24834 // CHECK23-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
24835 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24836 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
24837 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24838 // CHECK23-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24839 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24840 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24841 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24842 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
24843 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24844 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24845 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
24846 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24847 // CHECK23:       cond.true:
24848 // CHECK23-NEXT:    br label [[COND_END:%.*]]
24849 // CHECK23:       cond.false:
24850 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24851 // CHECK23-NEXT:    br label [[COND_END]]
24852 // CHECK23:       cond.end:
24853 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
24854 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24855 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24856 // CHECK23-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
24857 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24858 // CHECK23:       omp.inner.for.cond:
24859 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
24860 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
24861 // CHECK23-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
24862 // CHECK23-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24863 // CHECK23:       omp.inner.for.body:
24864 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
24865 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
24866 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24867 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !36
24868 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36
24869 // CHECK23-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
24870 // CHECK23-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !36
24871 // CHECK23-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !36
24872 // CHECK23-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
24873 // CHECK23-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
24874 // CHECK23-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
24875 // CHECK23-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !36
24876 // CHECK23-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
24877 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !36
24878 // CHECK23-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
24879 // CHECK23-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !36
24880 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24881 // CHECK23:       omp.body.continue:
24882 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24883 // CHECK23:       omp.inner.for.inc:
24884 // CHECK23-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
24885 // CHECK23-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
24886 // CHECK23-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
24887 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
24888 // CHECK23:       omp.inner.for.end:
24889 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24890 // CHECK23:       omp.loop.exit:
24891 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
24892 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24893 // CHECK23-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
24894 // CHECK23-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24895 // CHECK23:       .omp.final.then:
24896 // CHECK23-NEXT:    store i32 10, i32* [[I]], align 4
24897 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24898 // CHECK23:       .omp.final.done:
24899 // CHECK23-NEXT:    ret void
24900 //
24901 //
24902 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97
24903 // CHECK24-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
24904 // CHECK24-NEXT:  entry:
24905 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24906 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
24907 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
24908 // CHECK24-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
24909 // CHECK24-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
24910 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24911 // CHECK24-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24912 // CHECK24-NEXT:    store i32 [[DOTCAPTURE_EXPR_1]], i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
24913 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24914 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
24915 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR2]], align 4
24916 // CHECK24-NEXT:    call void @__kmpc_push_num_teams(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
24917 // CHECK24-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
24918 // CHECK24-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
24919 // CHECK24-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
24920 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
24921 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP4]])
24922 // CHECK24-NEXT:    ret void
24923 //
24924 //
24925 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined.
24926 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
24927 // CHECK24-NEXT:  entry:
24928 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
24929 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
24930 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24931 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24932 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24933 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24934 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24935 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
24936 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
24937 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
24938 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
24939 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
24940 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
24941 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
24942 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24943 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
24944 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
24945 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
24946 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
24947 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
24948 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
24949 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24950 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
24951 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
24952 // CHECK24:       cond.true:
24953 // CHECK24-NEXT:    br label [[COND_END:%.*]]
24954 // CHECK24:       cond.false:
24955 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
24956 // CHECK24-NEXT:    br label [[COND_END]]
24957 // CHECK24:       cond.end:
24958 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
24959 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
24960 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24961 // CHECK24-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
24962 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24963 // CHECK24:       omp.inner.for.cond:
24964 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
24965 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
24966 // CHECK24-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
24967 // CHECK24-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24968 // CHECK24:       omp.inner.for.body:
24969 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
24970 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
24971 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
24972 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
24973 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24974 // CHECK24:       omp.body.continue:
24975 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24976 // CHECK24:       omp.inner.for.inc:
24977 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
24978 // CHECK24-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
24979 // CHECK24-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
24980 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
24981 // CHECK24:       omp.inner.for.end:
24982 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
24983 // CHECK24:       omp.loop.exit:
24984 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
24985 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
24986 // CHECK24-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
24987 // CHECK24-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
24988 // CHECK24:       .omp.final.then:
24989 // CHECK24-NEXT:    store i32 10, i32* [[I]], align 4
24990 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
24991 // CHECK24:       .omp.final.done:
24992 // CHECK24-NEXT:    ret void
24993 //
24994 //
24995 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111
24996 // CHECK24-SAME: (i32 noundef [[AA:%.*]]) #[[ATTR0]] {
24997 // CHECK24-NEXT:  entry:
24998 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
24999 // CHECK24-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
25000 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
25001 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
25002 // CHECK24-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
25003 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
25004 // CHECK24-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
25005 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
25006 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]])
25007 // CHECK24-NEXT:    ret void
25008 //
25009 //
25010 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..1
25011 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
25012 // CHECK24-NEXT:  entry:
25013 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25014 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25015 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
25016 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25017 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25018 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25019 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25020 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25021 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25022 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
25023 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25024 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25025 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
25026 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
25027 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25028 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
25029 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25030 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25031 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25032 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
25033 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25034 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25035 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
25036 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25037 // CHECK24:       cond.true:
25038 // CHECK24-NEXT:    br label [[COND_END:%.*]]
25039 // CHECK24:       cond.false:
25040 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25041 // CHECK24-NEXT:    br label [[COND_END]]
25042 // CHECK24:       cond.end:
25043 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
25044 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25045 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25046 // CHECK24-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
25047 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25048 // CHECK24:       omp.inner.for.cond:
25049 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
25050 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
25051 // CHECK24-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
25052 // CHECK24-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25053 // CHECK24:       omp.inner.for.body:
25054 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
25055 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
25056 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25057 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19
25058 // CHECK24-NEXT:    [[TMP8:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !19
25059 // CHECK24-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP8]] to i32
25060 // CHECK24-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV2]], 1
25061 // CHECK24-NEXT:    [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16
25062 // CHECK24-NEXT:    store i16 [[CONV4]], i16* [[CONV]], align 2, !llvm.access.group !19
25063 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25064 // CHECK24:       omp.body.continue:
25065 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25066 // CHECK24:       omp.inner.for.inc:
25067 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
25068 // CHECK24-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP9]], 1
25069 // CHECK24-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
25070 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
25071 // CHECK24:       omp.inner.for.end:
25072 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25073 // CHECK24:       omp.loop.exit:
25074 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
25075 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25076 // CHECK24-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
25077 // CHECK24-NEXT:    br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25078 // CHECK24:       .omp.final.then:
25079 // CHECK24-NEXT:    store i32 10, i32* [[I]], align 4
25080 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25081 // CHECK24:       .omp.final.done:
25082 // CHECK24-NEXT:    ret void
25083 //
25084 //
25085 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118
25086 // CHECK24-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
25087 // CHECK24-NEXT:  entry:
25088 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
25089 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
25090 // CHECK24-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
25091 // CHECK24-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
25092 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
25093 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
25094 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
25095 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
25096 // CHECK24-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
25097 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
25098 // CHECK24-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
25099 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
25100 // CHECK24-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
25101 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
25102 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
25103 // CHECK24-NEXT:    ret void
25104 //
25105 //
25106 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..2
25107 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
25108 // CHECK24-NEXT:  entry:
25109 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25110 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25111 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
25112 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
25113 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25114 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25115 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25116 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25117 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25118 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25119 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
25120 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25121 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25122 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
25123 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
25124 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
25125 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25126 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
25127 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25128 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25129 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25130 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
25131 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25132 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25133 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
25134 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25135 // CHECK24:       cond.true:
25136 // CHECK24-NEXT:    br label [[COND_END:%.*]]
25137 // CHECK24:       cond.false:
25138 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25139 // CHECK24-NEXT:    br label [[COND_END]]
25140 // CHECK24:       cond.end:
25141 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
25142 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25143 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25144 // CHECK24-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
25145 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25146 // CHECK24:       omp.inner.for.cond:
25147 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
25148 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
25149 // CHECK24-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
25150 // CHECK24-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25151 // CHECK24:       omp.inner.for.body:
25152 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
25153 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
25154 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25155 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22
25156 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !22
25157 // CHECK24-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
25158 // CHECK24-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !22
25159 // CHECK24-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !22
25160 // CHECK24-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP9]] to i32
25161 // CHECK24-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
25162 // CHECK24-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
25163 // CHECK24-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !22
25164 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25165 // CHECK24:       omp.body.continue:
25166 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25167 // CHECK24:       omp.inner.for.inc:
25168 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
25169 // CHECK24-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1
25170 // CHECK24-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
25171 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
25172 // CHECK24:       omp.inner.for.end:
25173 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25174 // CHECK24:       omp.loop.exit:
25175 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
25176 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25177 // CHECK24-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
25178 // CHECK24-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25179 // CHECK24:       .omp.final.then:
25180 // CHECK24-NEXT:    store i32 10, i32* [[I]], align 4
25181 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25182 // CHECK24:       .omp.final.done:
25183 // CHECK24-NEXT:    ret void
25184 //
25185 //
25186 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142
25187 // CHECK24-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR0]] {
25188 // CHECK24-NEXT:  entry:
25189 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
25190 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
25191 // CHECK24-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25192 // CHECK24-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
25193 // CHECK24-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
25194 // CHECK24-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
25195 // CHECK24-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
25196 // CHECK24-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
25197 // CHECK24-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
25198 // CHECK24-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
25199 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
25200 // CHECK24-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
25201 // CHECK24-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25202 // CHECK24-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
25203 // CHECK24-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
25204 // CHECK24-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
25205 // CHECK24-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
25206 // CHECK24-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
25207 // CHECK24-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
25208 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
25209 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25210 // CHECK24-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
25211 // CHECK24-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
25212 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
25213 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
25214 // CHECK24-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
25215 // CHECK24-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
25216 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
25217 // CHECK24-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
25218 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
25219 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 9, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]])
25220 // CHECK24-NEXT:    ret void
25221 //
25222 //
25223 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..3
25224 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]]) #[[ATTR1]] {
25225 // CHECK24-NEXT:  entry:
25226 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25227 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25228 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
25229 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
25230 // CHECK24-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25231 // CHECK24-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
25232 // CHECK24-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
25233 // CHECK24-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
25234 // CHECK24-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
25235 // CHECK24-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
25236 // CHECK24-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
25237 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25238 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25239 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25240 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25241 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25242 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25243 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
25244 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25245 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25246 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
25247 // CHECK24-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
25248 // CHECK24-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25249 // CHECK24-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
25250 // CHECK24-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
25251 // CHECK24-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
25252 // CHECK24-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
25253 // CHECK24-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
25254 // CHECK24-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
25255 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
25256 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25257 // CHECK24-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
25258 // CHECK24-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
25259 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
25260 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
25261 // CHECK24-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
25262 // CHECK24-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
25263 // CHECK24-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 0
25264 // CHECK24-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
25265 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25266 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
25267 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25268 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25269 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25270 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
25271 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25272 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25273 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9
25274 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25275 // CHECK24:       cond.true:
25276 // CHECK24-NEXT:    br label [[COND_END:%.*]]
25277 // CHECK24:       cond.false:
25278 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25279 // CHECK24-NEXT:    br label [[COND_END]]
25280 // CHECK24:       cond.end:
25281 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
25282 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25283 // CHECK24-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25284 // CHECK24-NEXT:    store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
25285 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25286 // CHECK24:       omp.inner.for.cond:
25287 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
25288 // CHECK24-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
25289 // CHECK24-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
25290 // CHECK24-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25291 // CHECK24:       omp.inner.for.body:
25292 // CHECK24-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
25293 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
25294 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25295 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
25296 // CHECK24-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !25
25297 // CHECK24-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
25298 // CHECK24-NEXT:    store i32 [[ADD6]], i32* [[A_ADDR]], align 4, !llvm.access.group !25
25299 // CHECK24-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
25300 // CHECK24-NEXT:    [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !25
25301 // CHECK24-NEXT:    [[CONV:%.*]] = fpext float [[TMP17]] to double
25302 // CHECK24-NEXT:    [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00
25303 // CHECK24-NEXT:    [[CONV8:%.*]] = fptrunc double [[ADD7]] to float
25304 // CHECK24-NEXT:    store float [[CONV8]], float* [[ARRAYIDX]], align 4, !llvm.access.group !25
25305 // CHECK24-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
25306 // CHECK24-NEXT:    [[TMP18:%.*]] = load float, float* [[ARRAYIDX9]], align 4, !llvm.access.group !25
25307 // CHECK24-NEXT:    [[CONV10:%.*]] = fpext float [[TMP18]] to double
25308 // CHECK24-NEXT:    [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00
25309 // CHECK24-NEXT:    [[CONV12:%.*]] = fptrunc double [[ADD11]] to float
25310 // CHECK24-NEXT:    store float [[CONV12]], float* [[ARRAYIDX9]], align 4, !llvm.access.group !25
25311 // CHECK24-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
25312 // CHECK24-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX13]], i32 0, i32 2
25313 // CHECK24-NEXT:    [[TMP19:%.*]] = load double, double* [[ARRAYIDX14]], align 8, !llvm.access.group !25
25314 // CHECK24-NEXT:    [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00
25315 // CHECK24-NEXT:    store double [[ADD15]], double* [[ARRAYIDX14]], align 8, !llvm.access.group !25
25316 // CHECK24-NEXT:    [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]]
25317 // CHECK24-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP20]]
25318 // CHECK24-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX16]], i32 3
25319 // CHECK24-NEXT:    [[TMP21:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !25
25320 // CHECK24-NEXT:    [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00
25321 // CHECK24-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !25
25322 // CHECK24-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
25323 // CHECK24-NEXT:    [[TMP22:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !25
25324 // CHECK24-NEXT:    [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1
25325 // CHECK24-NEXT:    store i64 [[ADD19]], i64* [[X]], align 4, !llvm.access.group !25
25326 // CHECK24-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
25327 // CHECK24-NEXT:    [[TMP23:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !25
25328 // CHECK24-NEXT:    [[CONV20:%.*]] = sext i8 [[TMP23]] to i32
25329 // CHECK24-NEXT:    [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1
25330 // CHECK24-NEXT:    [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8
25331 // CHECK24-NEXT:    store i8 [[CONV22]], i8* [[Y]], align 4, !llvm.access.group !25
25332 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25333 // CHECK24:       omp.body.continue:
25334 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25335 // CHECK24:       omp.inner.for.inc:
25336 // CHECK24-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
25337 // CHECK24-NEXT:    [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1
25338 // CHECK24-NEXT:    store i32 [[ADD23]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
25339 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
25340 // CHECK24:       omp.inner.for.end:
25341 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25342 // CHECK24:       omp.loop.exit:
25343 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]])
25344 // CHECK24-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25345 // CHECK24-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
25346 // CHECK24-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25347 // CHECK24:       .omp.final.then:
25348 // CHECK24-NEXT:    store i32 10, i32* [[I]], align 4
25349 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25350 // CHECK24:       .omp.final.done:
25351 // CHECK24-NEXT:    ret void
25352 //
25353 //
25354 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197
25355 // CHECK24-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
25356 // CHECK24-NEXT:  entry:
25357 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
25358 // CHECK24-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25359 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
25360 // CHECK24-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
25361 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
25362 // CHECK24-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
25363 // CHECK24-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
25364 // CHECK24-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
25365 // CHECK24-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
25366 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
25367 // CHECK24-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25368 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
25369 // CHECK24-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
25370 // CHECK24-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
25371 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
25372 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
25373 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
25374 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
25375 // CHECK24-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
25376 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
25377 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
25378 // CHECK24-NEXT:    store i32 [[TMP3]], i32* [[N_CASTED]], align 4
25379 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_CASTED]], align 4
25380 // CHECK24-NEXT:    [[TMP5:%.*]] = load i16, i16* [[CONV]], align 2
25381 // CHECK24-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
25382 // CHECK24-NEXT:    store i16 [[TMP5]], i16* [[CONV2]], align 2
25383 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AA_CASTED]], align 4
25384 // CHECK24-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV1]], align 1
25385 // CHECK24-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
25386 // CHECK24-NEXT:    store i8 [[TMP7]], i8* [[CONV3]], align 1
25387 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
25388 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]])
25389 // CHECK24-NEXT:    ret void
25390 //
25391 //
25392 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..4
25393 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
25394 // CHECK24-NEXT:  entry:
25395 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25396 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25397 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
25398 // CHECK24-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25399 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
25400 // CHECK24-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
25401 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
25402 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25403 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25404 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25405 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
25406 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
25407 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
25408 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25409 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25410 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25411 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25412 // CHECK24-NEXT:    [[I6:%.*]] = alloca i32, align 4
25413 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25414 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25415 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
25416 // CHECK24-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25417 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
25418 // CHECK24-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
25419 // CHECK24-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
25420 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
25421 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
25422 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
25423 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
25424 // CHECK24-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
25425 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25426 // CHECK24-NEXT:    store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
25427 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
25428 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25429 // CHECK24-NEXT:    [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
25430 // CHECK24-NEXT:    [[SUB4:%.*]] = sub i32 [[SUB]], 1
25431 // CHECK24-NEXT:    [[ADD:%.*]] = add i32 [[SUB4]], 1
25432 // CHECK24-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
25433 // CHECK24-NEXT:    [[SUB5:%.*]] = sub i32 [[DIV]], 1
25434 // CHECK24-NEXT:    store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_3]], align 4
25435 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25436 // CHECK24-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
25437 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25438 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
25439 // CHECK24-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
25440 // CHECK24-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
25441 // CHECK24:       omp.precond.then:
25442 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25443 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
25444 // CHECK24-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_UB]], align 4
25445 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25446 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25447 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25448 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
25449 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25450 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25451 // CHECK24-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
25452 // CHECK24-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
25453 // CHECK24-NEXT:    br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25454 // CHECK24:       cond.true:
25455 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
25456 // CHECK24-NEXT:    br label [[COND_END:%.*]]
25457 // CHECK24:       cond.false:
25458 // CHECK24-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25459 // CHECK24-NEXT:    br label [[COND_END]]
25460 // CHECK24:       cond.end:
25461 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
25462 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25463 // CHECK24-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25464 // CHECK24-NEXT:    store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
25465 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25466 // CHECK24:       omp.inner.for.cond:
25467 // CHECK24-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
25468 // CHECK24-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
25469 // CHECK24-NEXT:    [[ADD8:%.*]] = add i32 [[TMP17]], 1
25470 // CHECK24-NEXT:    [[CMP9:%.*]] = icmp ult i32 [[TMP16]], [[ADD8]]
25471 // CHECK24-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25472 // CHECK24:       omp.inner.for.body:
25473 // CHECK24-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !28
25474 // CHECK24-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
25475 // CHECK24-NEXT:    [[MUL:%.*]] = mul i32 [[TMP19]], 1
25476 // CHECK24-NEXT:    [[ADD10:%.*]] = add i32 [[TMP18]], [[MUL]]
25477 // CHECK24-NEXT:    store i32 [[ADD10]], i32* [[I6]], align 4, !llvm.access.group !28
25478 // CHECK24-NEXT:    [[TMP20:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !28
25479 // CHECK24-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP20]], 1
25480 // CHECK24-NEXT:    store i32 [[ADD11]], i32* [[A_ADDR]], align 4, !llvm.access.group !28
25481 // CHECK24-NEXT:    [[TMP21:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !28
25482 // CHECK24-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP21]] to i32
25483 // CHECK24-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
25484 // CHECK24-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
25485 // CHECK24-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !28
25486 // CHECK24-NEXT:    [[TMP22:%.*]] = load i8, i8* [[CONV1]], align 1, !llvm.access.group !28
25487 // CHECK24-NEXT:    [[CONV15:%.*]] = sext i8 [[TMP22]] to i32
25488 // CHECK24-NEXT:    [[ADD16:%.*]] = add nsw i32 [[CONV15]], 1
25489 // CHECK24-NEXT:    [[CONV17:%.*]] = trunc i32 [[ADD16]] to i8
25490 // CHECK24-NEXT:    store i8 [[CONV17]], i8* [[CONV1]], align 1, !llvm.access.group !28
25491 // CHECK24-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
25492 // CHECK24-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
25493 // CHECK24-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP23]], 1
25494 // CHECK24-NEXT:    store i32 [[ADD18]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
25495 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25496 // CHECK24:       omp.body.continue:
25497 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25498 // CHECK24:       omp.inner.for.inc:
25499 // CHECK24-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
25500 // CHECK24-NEXT:    [[ADD19:%.*]] = add i32 [[TMP24]], 1
25501 // CHECK24-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
25502 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
25503 // CHECK24:       omp.inner.for.end:
25504 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25505 // CHECK24:       omp.loop.exit:
25506 // CHECK24-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25507 // CHECK24-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
25508 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
25509 // CHECK24-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25510 // CHECK24-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
25511 // CHECK24-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25512 // CHECK24:       .omp.final.then:
25513 // CHECK24-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25514 // CHECK24-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
25515 // CHECK24-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
25516 // CHECK24-NEXT:    [[SUB20:%.*]] = sub i32 [[TMP30]], [[TMP31]]
25517 // CHECK24-NEXT:    [[SUB21:%.*]] = sub i32 [[SUB20]], 1
25518 // CHECK24-NEXT:    [[ADD22:%.*]] = add i32 [[SUB21]], 1
25519 // CHECK24-NEXT:    [[DIV23:%.*]] = udiv i32 [[ADD22]], 1
25520 // CHECK24-NEXT:    [[MUL24:%.*]] = mul i32 [[DIV23]], 1
25521 // CHECK24-NEXT:    [[ADD25:%.*]] = add i32 [[TMP29]], [[MUL24]]
25522 // CHECK24-NEXT:    store i32 [[ADD25]], i32* [[I6]], align 4
25523 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25524 // CHECK24:       .omp.final.done:
25525 // CHECK24-NEXT:    br label [[OMP_PRECOND_END]]
25526 // CHECK24:       omp.precond.end:
25527 // CHECK24-NEXT:    ret void
25528 //
25529 //
25530 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215
25531 // CHECK24-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
25532 // CHECK24-NEXT:  entry:
25533 // CHECK24-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
25534 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
25535 // CHECK24-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25536 // CHECK24-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
25537 // CHECK24-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
25538 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
25539 // CHECK24-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
25540 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
25541 // CHECK24-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
25542 // CHECK24-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
25543 // CHECK24-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25544 // CHECK24-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
25545 // CHECK24-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
25546 // CHECK24-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
25547 // CHECK24-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
25548 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25549 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
25550 // CHECK24-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
25551 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
25552 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
25553 // CHECK24-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
25554 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
25555 // CHECK24-NEXT:    [[TMP6:%.*]] = load i8, i8* [[CONV]], align 1
25556 // CHECK24-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
25557 // CHECK24-NEXT:    [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
25558 // CHECK24-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
25559 // CHECK24-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV3]], align 1
25560 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
25561 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]], i32 [[TMP7]])
25562 // CHECK24-NEXT:    ret void
25563 //
25564 //
25565 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..5
25566 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
25567 // CHECK24-NEXT:  entry:
25568 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25569 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25570 // CHECK24-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
25571 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
25572 // CHECK24-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
25573 // CHECK24-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
25574 // CHECK24-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
25575 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
25576 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25577 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25578 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25579 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25580 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25581 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25582 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
25583 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25584 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25585 // CHECK24-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
25586 // CHECK24-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
25587 // CHECK24-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
25588 // CHECK24-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
25589 // CHECK24-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
25590 // CHECK24-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
25591 // CHECK24-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
25592 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
25593 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
25594 // CHECK24-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
25595 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
25596 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25597 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
25598 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25599 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25600 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25601 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
25602 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25603 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25604 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9
25605 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25606 // CHECK24:       cond.true:
25607 // CHECK24-NEXT:    br label [[COND_END:%.*]]
25608 // CHECK24:       cond.false:
25609 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25610 // CHECK24-NEXT:    br label [[COND_END]]
25611 // CHECK24:       cond.end:
25612 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
25613 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25614 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25615 // CHECK24-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
25616 // CHECK24-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1
25617 // CHECK24-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
25618 // CHECK24-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
25619 // CHECK24:       omp_if.then:
25620 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25621 // CHECK24:       omp.inner.for.cond:
25622 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
25623 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !31
25624 // CHECK24-NEXT:    [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
25625 // CHECK24-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25626 // CHECK24:       omp.inner.for.body:
25627 // CHECK24-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
25628 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
25629 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25630 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !31
25631 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !31
25632 // CHECK24-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double
25633 // CHECK24-NEXT:    [[ADD5:%.*]] = fadd double [[CONV4]], 1.500000e+00
25634 // CHECK24-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
25635 // CHECK24-NEXT:    store double [[ADD5]], double* [[A]], align 4, !llvm.access.group !31
25636 // CHECK24-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
25637 // CHECK24-NEXT:    [[TMP14:%.*]] = load double, double* [[A6]], align 4, !llvm.access.group !31
25638 // CHECK24-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
25639 // CHECK24-NEXT:    store double [[INC]], double* [[A6]], align 4, !llvm.access.group !31
25640 // CHECK24-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
25641 // CHECK24-NEXT:    [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]]
25642 // CHECK24-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]]
25643 // CHECK24-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
25644 // CHECK24-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !31
25645 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25646 // CHECK24:       omp.body.continue:
25647 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25648 // CHECK24:       omp.inner.for.inc:
25649 // CHECK24-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
25650 // CHECK24-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
25651 // CHECK24-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
25652 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
25653 // CHECK24:       omp.inner.for.end:
25654 // CHECK24-NEXT:    br label [[OMP_IF_END:%.*]]
25655 // CHECK24:       omp_if.else:
25656 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
25657 // CHECK24:       omp.inner.for.cond10:
25658 // CHECK24-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
25659 // CHECK24-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25660 // CHECK24-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
25661 // CHECK24-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END26:%.*]]
25662 // CHECK24:       omp.inner.for.body12:
25663 // CHECK24-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
25664 // CHECK24-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1
25665 // CHECK24-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
25666 // CHECK24-NEXT:    store i32 [[ADD14]], i32* [[I]], align 4
25667 // CHECK24-NEXT:    [[TMP20:%.*]] = load i32, i32* [[B_ADDR]], align 4
25668 // CHECK24-NEXT:    [[CONV15:%.*]] = sitofp i32 [[TMP20]] to double
25669 // CHECK24-NEXT:    [[ADD16:%.*]] = fadd double [[CONV15]], 1.500000e+00
25670 // CHECK24-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
25671 // CHECK24-NEXT:    store double [[ADD16]], double* [[A17]], align 4
25672 // CHECK24-NEXT:    [[A18:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
25673 // CHECK24-NEXT:    [[TMP21:%.*]] = load double, double* [[A18]], align 4
25674 // CHECK24-NEXT:    [[INC19:%.*]] = fadd double [[TMP21]], 1.000000e+00
25675 // CHECK24-NEXT:    store double [[INC19]], double* [[A18]], align 4
25676 // CHECK24-NEXT:    [[CONV20:%.*]] = fptosi double [[INC19]] to i16
25677 // CHECK24-NEXT:    [[TMP22:%.*]] = mul nsw i32 1, [[TMP2]]
25678 // CHECK24-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP22]]
25679 // CHECK24-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX21]], i32 1
25680 // CHECK24-NEXT:    store i16 [[CONV20]], i16* [[ARRAYIDX22]], align 2
25681 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE23:%.*]]
25682 // CHECK24:       omp.body.continue23:
25683 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC24:%.*]]
25684 // CHECK24:       omp.inner.for.inc24:
25685 // CHECK24-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
25686 // CHECK24-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP23]], 1
25687 // CHECK24-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_IV]], align 4
25688 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP34:![0-9]+]]
25689 // CHECK24:       omp.inner.for.end26:
25690 // CHECK24-NEXT:    br label [[OMP_IF_END]]
25691 // CHECK24:       omp_if.end:
25692 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25693 // CHECK24:       omp.loop.exit:
25694 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
25695 // CHECK24-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25696 // CHECK24-NEXT:    [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
25697 // CHECK24-NEXT:    br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25698 // CHECK24:       .omp.final.then:
25699 // CHECK24-NEXT:    store i32 10, i32* [[I]], align 4
25700 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25701 // CHECK24:       .omp.final.done:
25702 // CHECK24-NEXT:    ret void
25703 //
25704 //
25705 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180
25706 // CHECK24-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
25707 // CHECK24-NEXT:  entry:
25708 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
25709 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
25710 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
25711 // CHECK24-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
25712 // CHECK24-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
25713 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
25714 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
25715 // CHECK24-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
25716 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
25717 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
25718 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
25719 // CHECK24-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
25720 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
25721 // CHECK24-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
25722 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
25723 // CHECK24-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
25724 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
25725 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
25726 // CHECK24-NEXT:    ret void
25727 //
25728 //
25729 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..6
25730 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
25731 // CHECK24-NEXT:  entry:
25732 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
25733 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
25734 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
25735 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
25736 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
25737 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25738 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25739 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25740 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25741 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
25742 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
25743 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
25744 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
25745 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
25746 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
25747 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
25748 // CHECK24-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
25749 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
25750 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
25751 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25752 // CHECK24-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
25753 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
25754 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
25755 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
25756 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
25757 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
25758 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25759 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
25760 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
25761 // CHECK24:       cond.true:
25762 // CHECK24-NEXT:    br label [[COND_END:%.*]]
25763 // CHECK24:       cond.false:
25764 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
25765 // CHECK24-NEXT:    br label [[COND_END]]
25766 // CHECK24:       cond.end:
25767 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
25768 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
25769 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25770 // CHECK24-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
25771 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25772 // CHECK24:       omp.inner.for.cond:
25773 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
25774 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
25775 // CHECK24-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
25776 // CHECK24-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25777 // CHECK24:       omp.inner.for.body:
25778 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
25779 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
25780 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25781 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !36
25782 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36
25783 // CHECK24-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
25784 // CHECK24-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !36
25785 // CHECK24-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !36
25786 // CHECK24-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
25787 // CHECK24-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
25788 // CHECK24-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
25789 // CHECK24-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !36
25790 // CHECK24-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
25791 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !36
25792 // CHECK24-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
25793 // CHECK24-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !36
25794 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25795 // CHECK24:       omp.body.continue:
25796 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25797 // CHECK24:       omp.inner.for.inc:
25798 // CHECK24-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
25799 // CHECK24-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
25800 // CHECK24-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
25801 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
25802 // CHECK24:       omp.inner.for.end:
25803 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
25804 // CHECK24:       omp.loop.exit:
25805 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
25806 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
25807 // CHECK24-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
25808 // CHECK24-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
25809 // CHECK24:       .omp.final.then:
25810 // CHECK24-NEXT:    store i32 10, i32* [[I]], align 4
25811 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
25812 // CHECK24:       .omp.final.done:
25813 // CHECK24-NEXT:    ret void
25814 //
25815 //
25816 // CHECK25-LABEL: define {{[^@]+}}@_Z3fooi
25817 // CHECK25-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
25818 // CHECK25-NEXT:  entry:
25819 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25820 // CHECK25-NEXT:    [[A:%.*]] = alloca i32, align 4
25821 // CHECK25-NEXT:    [[AA:%.*]] = alloca i16, align 2
25822 // CHECK25-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
25823 // CHECK25-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
25824 // CHECK25-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
25825 // CHECK25-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
25826 // CHECK25-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
25827 // CHECK25-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
25828 // CHECK25-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25829 // CHECK25-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
25830 // CHECK25-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25831 // CHECK25-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25832 // CHECK25-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25833 // CHECK25-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25834 // CHECK25-NEXT:    [[I:%.*]] = alloca i32, align 4
25835 // CHECK25-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
25836 // CHECK25-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
25837 // CHECK25-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
25838 // CHECK25-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
25839 // CHECK25-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
25840 // CHECK25-NEXT:    [[A8:%.*]] = alloca i32, align 4
25841 // CHECK25-NEXT:    [[A9:%.*]] = alloca i32, align 4
25842 // CHECK25-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
25843 // CHECK25-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
25844 // CHECK25-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
25845 // CHECK25-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
25846 // CHECK25-NEXT:    [[I24:%.*]] = alloca i32, align 4
25847 // CHECK25-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
25848 // CHECK25-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
25849 // CHECK25-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
25850 // CHECK25-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
25851 // CHECK25-NEXT:    [[I40:%.*]] = alloca i32, align 4
25852 // CHECK25-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
25853 // CHECK25-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
25854 // CHECK25-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
25855 // CHECK25-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
25856 // CHECK25-NEXT:    [[I58:%.*]] = alloca i32, align 4
25857 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25858 // CHECK25-NEXT:    store i32 0, i32* [[A]], align 4
25859 // CHECK25-NEXT:    store i16 0, i16* [[AA]], align 2
25860 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
25861 // CHECK25-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
25862 // CHECK25-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
25863 // CHECK25-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
25864 // CHECK25-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
25865 // CHECK25-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
25866 // CHECK25-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
25867 // CHECK25-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
25868 // CHECK25-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
25869 // CHECK25-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
25870 // CHECK25-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
25871 // CHECK25-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
25872 // CHECK25-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
25873 // CHECK25-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
25874 // CHECK25-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_2]], align 4
25875 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25876 // CHECK25-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
25877 // CHECK25-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25878 // CHECK25-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
25879 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25880 // CHECK25:       omp.inner.for.cond:
25881 // CHECK25-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
25882 // CHECK25-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
25883 // CHECK25-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
25884 // CHECK25-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25885 // CHECK25:       omp.inner.for.body:
25886 // CHECK25-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
25887 // CHECK25-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
25888 // CHECK25-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
25889 // CHECK25-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
25890 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25891 // CHECK25:       omp.body.continue:
25892 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25893 // CHECK25:       omp.inner.for.inc:
25894 // CHECK25-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
25895 // CHECK25-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
25896 // CHECK25-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
25897 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
25898 // CHECK25:       omp.inner.for.end:
25899 // CHECK25-NEXT:    store i32 10, i32* [[I]], align 4
25900 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
25901 // CHECK25-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
25902 // CHECK25-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
25903 // CHECK25-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV7]], align 4
25904 // CHECK25-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A]], align 4
25905 // CHECK25-NEXT:    store i32 [[TMP14]], i32* [[DOTLINEAR_START]], align 4
25906 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
25907 // CHECK25:       omp.inner.for.cond10:
25908 // CHECK25-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
25909 // CHECK25-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
25910 // CHECK25-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
25911 // CHECK25-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
25912 // CHECK25:       omp.inner.for.body12:
25913 // CHECK25-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
25914 // CHECK25-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1
25915 // CHECK25-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
25916 // CHECK25-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4
25917 // CHECK25-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A8]], align 4
25918 // CHECK25-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
25919 // CHECK25-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4
25920 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
25921 // CHECK25:       omp.body.continue16:
25922 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
25923 // CHECK25:       omp.inner.for.inc17:
25924 // CHECK25-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
25925 // CHECK25-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
25926 // CHECK25-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
25927 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP7:![0-9]+]]
25928 // CHECK25:       omp.inner.for.end19:
25929 // CHECK25-NEXT:    store i32 10, i32* [[A]], align 4
25930 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
25931 // CHECK25-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
25932 // CHECK25-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
25933 // CHECK25-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_IV23]], align 4
25934 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
25935 // CHECK25:       omp.inner.for.cond25:
25936 // CHECK25-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
25937 // CHECK25-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !9
25938 // CHECK25-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]]
25939 // CHECK25-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
25940 // CHECK25:       omp.inner.for.body27:
25941 // CHECK25-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
25942 // CHECK25-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1
25943 // CHECK25-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
25944 // CHECK25-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !9
25945 // CHECK25-NEXT:    [[TMP24:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
25946 // CHECK25-NEXT:    [[CONV:%.*]] = sext i16 [[TMP24]] to i32
25947 // CHECK25-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
25948 // CHECK25-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
25949 // CHECK25-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !9
25950 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
25951 // CHECK25:       omp.body.continue32:
25952 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
25953 // CHECK25:       omp.inner.for.inc33:
25954 // CHECK25-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
25955 // CHECK25-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1
25956 // CHECK25-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
25957 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]]
25958 // CHECK25:       omp.inner.for.end35:
25959 // CHECK25-NEXT:    store i32 10, i32* [[I24]], align 4
25960 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
25961 // CHECK25-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
25962 // CHECK25-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
25963 // CHECK25-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV39]], align 4
25964 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
25965 // CHECK25:       omp.inner.for.cond41:
25966 // CHECK25-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
25967 // CHECK25-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !12
25968 // CHECK25-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
25969 // CHECK25-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
25970 // CHECK25:       omp.inner.for.body43:
25971 // CHECK25-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
25972 // CHECK25-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1
25973 // CHECK25-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
25974 // CHECK25-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !12
25975 // CHECK25-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
25976 // CHECK25-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1
25977 // CHECK25-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !12
25978 // CHECK25-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
25979 // CHECK25-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP31]] to i32
25980 // CHECK25-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
25981 // CHECK25-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
25982 // CHECK25-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !12
25983 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
25984 // CHECK25:       omp.body.continue50:
25985 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
25986 // CHECK25:       omp.inner.for.inc51:
25987 // CHECK25-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
25988 // CHECK25-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1
25989 // CHECK25-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
25990 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP13:![0-9]+]]
25991 // CHECK25:       omp.inner.for.end53:
25992 // CHECK25-NEXT:    store i32 10, i32* [[I40]], align 4
25993 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
25994 // CHECK25-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
25995 // CHECK25-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
25996 // CHECK25-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV57]], align 4
25997 // CHECK25-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 0
25998 // CHECK25-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
25999 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
26000 // CHECK25:       omp.inner.for.cond59:
26001 // CHECK25-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
26002 // CHECK25-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !15
26003 // CHECK25-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]]
26004 // CHECK25-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
26005 // CHECK25:       omp.inner.for.body61:
26006 // CHECK25-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
26007 // CHECK25-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1
26008 // CHECK25-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
26009 // CHECK25-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !15
26010 // CHECK25-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
26011 // CHECK25-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1
26012 // CHECK25-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !15
26013 // CHECK25-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
26014 // CHECK25-NEXT:    [[TMP38:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
26015 // CHECK25-NEXT:    [[CONV65:%.*]] = fpext float [[TMP38]] to double
26016 // CHECK25-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
26017 // CHECK25-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
26018 // CHECK25-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
26019 // CHECK25-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
26020 // CHECK25-NEXT:    [[TMP39:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !15
26021 // CHECK25-NEXT:    [[CONV69:%.*]] = fpext float [[TMP39]] to double
26022 // CHECK25-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
26023 // CHECK25-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
26024 // CHECK25-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !15
26025 // CHECK25-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
26026 // CHECK25-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i64 0, i64 2
26027 // CHECK25-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !15
26028 // CHECK25-NEXT:    [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00
26029 // CHECK25-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !15
26030 // CHECK25-NEXT:    [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]]
26031 // CHECK25-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP41]]
26032 // CHECK25-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i64 3
26033 // CHECK25-NEXT:    [[TMP42:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !15
26034 // CHECK25-NEXT:    [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00
26035 // CHECK25-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !15
26036 // CHECK25-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
26037 // CHECK25-NEXT:    [[TMP43:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
26038 // CHECK25-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1
26039 // CHECK25-NEXT:    store i64 [[ADD78]], i64* [[X]], align 8, !llvm.access.group !15
26040 // CHECK25-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
26041 // CHECK25-NEXT:    [[TMP44:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
26042 // CHECK25-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP44]] to i32
26043 // CHECK25-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
26044 // CHECK25-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
26045 // CHECK25-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 8, !llvm.access.group !15
26046 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
26047 // CHECK25:       omp.body.continue82:
26048 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
26049 // CHECK25:       omp.inner.for.inc83:
26050 // CHECK25-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
26051 // CHECK25-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1
26052 // CHECK25-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
26053 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP16:![0-9]+]]
26054 // CHECK25:       omp.inner.for.end85:
26055 // CHECK25-NEXT:    store i32 10, i32* [[I58]], align 4
26056 // CHECK25-NEXT:    [[TMP46:%.*]] = load i32, i32* [[A]], align 4
26057 // CHECK25-NEXT:    [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
26058 // CHECK25-NEXT:    call void @llvm.stackrestore(i8* [[TMP47]])
26059 // CHECK25-NEXT:    ret i32 [[TMP46]]
26060 //
26061 //
26062 // CHECK25-LABEL: define {{[^@]+}}@_Z3bari
26063 // CHECK25-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
26064 // CHECK25-NEXT:  entry:
26065 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26066 // CHECK25-NEXT:    [[A:%.*]] = alloca i32, align 4
26067 // CHECK25-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
26068 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26069 // CHECK25-NEXT:    store i32 0, i32* [[A]], align 4
26070 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26071 // CHECK25-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
26072 // CHECK25-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
26073 // CHECK25-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
26074 // CHECK25-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
26075 // CHECK25-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26076 // CHECK25-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
26077 // CHECK25-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
26078 // CHECK25-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
26079 // CHECK25-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
26080 // CHECK25-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
26081 // CHECK25-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
26082 // CHECK25-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
26083 // CHECK25-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
26084 // CHECK25-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
26085 // CHECK25-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
26086 // CHECK25-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
26087 // CHECK25-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
26088 // CHECK25-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
26089 // CHECK25-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
26090 // CHECK25-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
26091 // CHECK25-NEXT:    ret i32 [[TMP8]]
26092 //
26093 //
26094 // CHECK25-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
26095 // CHECK25-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
26096 // CHECK25-NEXT:  entry:
26097 // CHECK25-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
26098 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26099 // CHECK25-NEXT:    [[B:%.*]] = alloca i32, align 4
26100 // CHECK25-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
26101 // CHECK25-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
26102 // CHECK25-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26103 // CHECK25-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26104 // CHECK25-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26105 // CHECK25-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26106 // CHECK25-NEXT:    [[I:%.*]] = alloca i32, align 4
26107 // CHECK25-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
26108 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26109 // CHECK25-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
26110 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26111 // CHECK25-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
26112 // CHECK25-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
26113 // CHECK25-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
26114 // CHECK25-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
26115 // CHECK25-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
26116 // CHECK25-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
26117 // CHECK25-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
26118 // CHECK25-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
26119 // CHECK25-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
26120 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26121 // CHECK25-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
26122 // CHECK25-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26123 // CHECK25-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
26124 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26125 // CHECK25:       omp.inner.for.cond:
26126 // CHECK25-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
26127 // CHECK25-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
26128 // CHECK25-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
26129 // CHECK25-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26130 // CHECK25:       omp.inner.for.body:
26131 // CHECK25-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
26132 // CHECK25-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
26133 // CHECK25-NEXT:    [[ADD2:%.*]] = add nsw i32 0, [[MUL]]
26134 // CHECK25-NEXT:    store i32 [[ADD2]], i32* [[I]], align 4, !llvm.access.group !18
26135 // CHECK25-NEXT:    [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
26136 // CHECK25-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP9]] to double
26137 // CHECK25-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
26138 // CHECK25-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
26139 // CHECK25-NEXT:    store double [[ADD3]], double* [[A]], align 8, !llvm.access.group !18
26140 // CHECK25-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
26141 // CHECK25-NEXT:    [[TMP10:%.*]] = load double, double* [[A4]], align 8, !llvm.access.group !18
26142 // CHECK25-NEXT:    [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00
26143 // CHECK25-NEXT:    store double [[INC]], double* [[A4]], align 8, !llvm.access.group !18
26144 // CHECK25-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
26145 // CHECK25-NEXT:    [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]]
26146 // CHECK25-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]]
26147 // CHECK25-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
26148 // CHECK25-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18
26149 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26150 // CHECK25:       omp.body.continue:
26151 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26152 // CHECK25:       omp.inner.for.inc:
26153 // CHECK25-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
26154 // CHECK25-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
26155 // CHECK25-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
26156 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
26157 // CHECK25:       omp.inner.for.end:
26158 // CHECK25-NEXT:    store i32 10, i32* [[I]], align 4
26159 // CHECK25-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
26160 // CHECK25-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
26161 // CHECK25-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX8]], i64 1
26162 // CHECK25-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX9]], align 2
26163 // CHECK25-NEXT:    [[CONV10:%.*]] = sext i16 [[TMP14]] to i32
26164 // CHECK25-NEXT:    [[TMP15:%.*]] = load i32, i32* [[B]], align 4
26165 // CHECK25-NEXT:    [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP15]]
26166 // CHECK25-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
26167 // CHECK25-NEXT:    call void @llvm.stackrestore(i8* [[TMP16]])
26168 // CHECK25-NEXT:    ret i32 [[ADD11]]
26169 //
26170 //
26171 // CHECK25-LABEL: define {{[^@]+}}@_ZL7fstatici
26172 // CHECK25-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
26173 // CHECK25-NEXT:  entry:
26174 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26175 // CHECK25-NEXT:    [[A:%.*]] = alloca i32, align 4
26176 // CHECK25-NEXT:    [[AA:%.*]] = alloca i16, align 2
26177 // CHECK25-NEXT:    [[AAA:%.*]] = alloca i8, align 1
26178 // CHECK25-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
26179 // CHECK25-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26180 // CHECK25-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26181 // CHECK25-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26182 // CHECK25-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26183 // CHECK25-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26184 // CHECK25-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26185 // CHECK25-NEXT:    [[I:%.*]] = alloca i32, align 4
26186 // CHECK25-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26187 // CHECK25-NEXT:    [[I5:%.*]] = alloca i32, align 4
26188 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26189 // CHECK25-NEXT:    store i32 0, i32* [[A]], align 4
26190 // CHECK25-NEXT:    store i16 0, i16* [[AA]], align 2
26191 // CHECK25-NEXT:    store i8 0, i8* [[AAA]], align 1
26192 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
26193 // CHECK25-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
26194 // CHECK25-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
26195 // CHECK25-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26196 // CHECK25-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26197 // CHECK25-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26198 // CHECK25-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
26199 // CHECK25-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
26200 // CHECK25-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
26201 // CHECK25-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
26202 // CHECK25-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
26203 // CHECK25-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26204 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26205 // CHECK25-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26206 // CHECK25-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
26207 // CHECK25-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26208 // CHECK25-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
26209 // CHECK25-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26210 // CHECK25-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26211 // CHECK25-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
26212 // CHECK25-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
26213 // CHECK25:       simd.if.then:
26214 // CHECK25-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26215 // CHECK25-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
26216 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26217 // CHECK25:       omp.inner.for.cond:
26218 // CHECK25-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
26219 // CHECK25-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
26220 // CHECK25-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
26221 // CHECK25-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
26222 // CHECK25-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26223 // CHECK25:       omp.inner.for.body:
26224 // CHECK25-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !21
26225 // CHECK25-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
26226 // CHECK25-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
26227 // CHECK25-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
26228 // CHECK25-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !21
26229 // CHECK25-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21
26230 // CHECK25-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
26231 // CHECK25-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !21
26232 // CHECK25-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21
26233 // CHECK25-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
26234 // CHECK25-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
26235 // CHECK25-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
26236 // CHECK25-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !21
26237 // CHECK25-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !21
26238 // CHECK25-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
26239 // CHECK25-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
26240 // CHECK25-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
26241 // CHECK25-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !21
26242 // CHECK25-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
26243 // CHECK25-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
26244 // CHECK25-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
26245 // CHECK25-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
26246 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26247 // CHECK25:       omp.body.continue:
26248 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26249 // CHECK25:       omp.inner.for.inc:
26250 // CHECK25-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
26251 // CHECK25-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
26252 // CHECK25-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
26253 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
26254 // CHECK25:       omp.inner.for.end:
26255 // CHECK25-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26256 // CHECK25-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26257 // CHECK25-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26258 // CHECK25-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
26259 // CHECK25-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
26260 // CHECK25-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
26261 // CHECK25-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
26262 // CHECK25-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
26263 // CHECK25-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
26264 // CHECK25-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
26265 // CHECK25-NEXT:    br label [[SIMD_IF_END]]
26266 // CHECK25:       simd.if.end:
26267 // CHECK25-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
26268 // CHECK25-NEXT:    ret i32 [[TMP21]]
26269 //
26270 //
26271 // CHECK25-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
26272 // CHECK25-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
26273 // CHECK25-NEXT:  entry:
26274 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26275 // CHECK25-NEXT:    [[A:%.*]] = alloca i32, align 4
26276 // CHECK25-NEXT:    [[AA:%.*]] = alloca i16, align 2
26277 // CHECK25-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
26278 // CHECK25-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26279 // CHECK25-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26280 // CHECK25-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26281 // CHECK25-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26282 // CHECK25-NEXT:    [[I:%.*]] = alloca i32, align 4
26283 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26284 // CHECK25-NEXT:    store i32 0, i32* [[A]], align 4
26285 // CHECK25-NEXT:    store i16 0, i16* [[AA]], align 2
26286 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26287 // CHECK25-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
26288 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26289 // CHECK25-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
26290 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26291 // CHECK25:       omp.inner.for.cond:
26292 // CHECK25-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
26293 // CHECK25-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
26294 // CHECK25-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
26295 // CHECK25-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26296 // CHECK25:       omp.inner.for.body:
26297 // CHECK25-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
26298 // CHECK25-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
26299 // CHECK25-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26300 // CHECK25-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
26301 // CHECK25-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
26302 // CHECK25-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
26303 // CHECK25-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24
26304 // CHECK25-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
26305 // CHECK25-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
26306 // CHECK25-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
26307 // CHECK25-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
26308 // CHECK25-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24
26309 // CHECK25-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
26310 // CHECK25-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
26311 // CHECK25-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
26312 // CHECK25-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
26313 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26314 // CHECK25:       omp.body.continue:
26315 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26316 // CHECK25:       omp.inner.for.inc:
26317 // CHECK25-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
26318 // CHECK25-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
26319 // CHECK25-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
26320 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
26321 // CHECK25:       omp.inner.for.end:
26322 // CHECK25-NEXT:    store i32 10, i32* [[I]], align 4
26323 // CHECK25-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
26324 // CHECK25-NEXT:    ret i32 [[TMP8]]
26325 //
26326 //
26327 // CHECK26-LABEL: define {{[^@]+}}@_Z3fooi
26328 // CHECK26-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
26329 // CHECK26-NEXT:  entry:
26330 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26331 // CHECK26-NEXT:    [[A:%.*]] = alloca i32, align 4
26332 // CHECK26-NEXT:    [[AA:%.*]] = alloca i16, align 2
26333 // CHECK26-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
26334 // CHECK26-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
26335 // CHECK26-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
26336 // CHECK26-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
26337 // CHECK26-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
26338 // CHECK26-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
26339 // CHECK26-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26340 // CHECK26-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26341 // CHECK26-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26342 // CHECK26-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26343 // CHECK26-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26344 // CHECK26-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26345 // CHECK26-NEXT:    [[I:%.*]] = alloca i32, align 4
26346 // CHECK26-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
26347 // CHECK26-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
26348 // CHECK26-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
26349 // CHECK26-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
26350 // CHECK26-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
26351 // CHECK26-NEXT:    [[A8:%.*]] = alloca i32, align 4
26352 // CHECK26-NEXT:    [[A9:%.*]] = alloca i32, align 4
26353 // CHECK26-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
26354 // CHECK26-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
26355 // CHECK26-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
26356 // CHECK26-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
26357 // CHECK26-NEXT:    [[I24:%.*]] = alloca i32, align 4
26358 // CHECK26-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
26359 // CHECK26-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
26360 // CHECK26-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
26361 // CHECK26-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
26362 // CHECK26-NEXT:    [[I40:%.*]] = alloca i32, align 4
26363 // CHECK26-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
26364 // CHECK26-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
26365 // CHECK26-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
26366 // CHECK26-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
26367 // CHECK26-NEXT:    [[I58:%.*]] = alloca i32, align 4
26368 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26369 // CHECK26-NEXT:    store i32 0, i32* [[A]], align 4
26370 // CHECK26-NEXT:    store i16 0, i16* [[AA]], align 2
26371 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26372 // CHECK26-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
26373 // CHECK26-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
26374 // CHECK26-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
26375 // CHECK26-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
26376 // CHECK26-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
26377 // CHECK26-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
26378 // CHECK26-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
26379 // CHECK26-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
26380 // CHECK26-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
26381 // CHECK26-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
26382 // CHECK26-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
26383 // CHECK26-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
26384 // CHECK26-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
26385 // CHECK26-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26386 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26387 // CHECK26-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
26388 // CHECK26-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26389 // CHECK26-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
26390 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26391 // CHECK26:       omp.inner.for.cond:
26392 // CHECK26-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
26393 // CHECK26-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
26394 // CHECK26-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
26395 // CHECK26-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26396 // CHECK26:       omp.inner.for.body:
26397 // CHECK26-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
26398 // CHECK26-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
26399 // CHECK26-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26400 // CHECK26-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
26401 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26402 // CHECK26:       omp.body.continue:
26403 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26404 // CHECK26:       omp.inner.for.inc:
26405 // CHECK26-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
26406 // CHECK26-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
26407 // CHECK26-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
26408 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
26409 // CHECK26:       omp.inner.for.end:
26410 // CHECK26-NEXT:    store i32 10, i32* [[I]], align 4
26411 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
26412 // CHECK26-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
26413 // CHECK26-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
26414 // CHECK26-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV7]], align 4
26415 // CHECK26-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A]], align 4
26416 // CHECK26-NEXT:    store i32 [[TMP14]], i32* [[DOTLINEAR_START]], align 4
26417 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
26418 // CHECK26:       omp.inner.for.cond10:
26419 // CHECK26-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
26420 // CHECK26-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
26421 // CHECK26-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
26422 // CHECK26-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
26423 // CHECK26:       omp.inner.for.body12:
26424 // CHECK26-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
26425 // CHECK26-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1
26426 // CHECK26-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
26427 // CHECK26-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4
26428 // CHECK26-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A8]], align 4
26429 // CHECK26-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
26430 // CHECK26-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4
26431 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
26432 // CHECK26:       omp.body.continue16:
26433 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
26434 // CHECK26:       omp.inner.for.inc17:
26435 // CHECK26-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
26436 // CHECK26-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
26437 // CHECK26-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
26438 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP7:![0-9]+]]
26439 // CHECK26:       omp.inner.for.end19:
26440 // CHECK26-NEXT:    store i32 10, i32* [[A]], align 4
26441 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
26442 // CHECK26-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
26443 // CHECK26-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
26444 // CHECK26-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_IV23]], align 4
26445 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
26446 // CHECK26:       omp.inner.for.cond25:
26447 // CHECK26-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
26448 // CHECK26-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !9
26449 // CHECK26-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]]
26450 // CHECK26-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
26451 // CHECK26:       omp.inner.for.body27:
26452 // CHECK26-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
26453 // CHECK26-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1
26454 // CHECK26-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
26455 // CHECK26-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !9
26456 // CHECK26-NEXT:    [[TMP24:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
26457 // CHECK26-NEXT:    [[CONV:%.*]] = sext i16 [[TMP24]] to i32
26458 // CHECK26-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
26459 // CHECK26-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
26460 // CHECK26-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !9
26461 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
26462 // CHECK26:       omp.body.continue32:
26463 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
26464 // CHECK26:       omp.inner.for.inc33:
26465 // CHECK26-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
26466 // CHECK26-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1
26467 // CHECK26-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9
26468 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]]
26469 // CHECK26:       omp.inner.for.end35:
26470 // CHECK26-NEXT:    store i32 10, i32* [[I24]], align 4
26471 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
26472 // CHECK26-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
26473 // CHECK26-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
26474 // CHECK26-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV39]], align 4
26475 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
26476 // CHECK26:       omp.inner.for.cond41:
26477 // CHECK26-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
26478 // CHECK26-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !12
26479 // CHECK26-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
26480 // CHECK26-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
26481 // CHECK26:       omp.inner.for.body43:
26482 // CHECK26-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
26483 // CHECK26-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1
26484 // CHECK26-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
26485 // CHECK26-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !12
26486 // CHECK26-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
26487 // CHECK26-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1
26488 // CHECK26-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !12
26489 // CHECK26-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
26490 // CHECK26-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP31]] to i32
26491 // CHECK26-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
26492 // CHECK26-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
26493 // CHECK26-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !12
26494 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
26495 // CHECK26:       omp.body.continue50:
26496 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
26497 // CHECK26:       omp.inner.for.inc51:
26498 // CHECK26-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
26499 // CHECK26-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1
26500 // CHECK26-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !12
26501 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP13:![0-9]+]]
26502 // CHECK26:       omp.inner.for.end53:
26503 // CHECK26-NEXT:    store i32 10, i32* [[I40]], align 4
26504 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
26505 // CHECK26-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
26506 // CHECK26-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
26507 // CHECK26-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV57]], align 4
26508 // CHECK26-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 0
26509 // CHECK26-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
26510 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
26511 // CHECK26:       omp.inner.for.cond59:
26512 // CHECK26-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
26513 // CHECK26-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !15
26514 // CHECK26-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]]
26515 // CHECK26-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
26516 // CHECK26:       omp.inner.for.body61:
26517 // CHECK26-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
26518 // CHECK26-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1
26519 // CHECK26-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
26520 // CHECK26-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !15
26521 // CHECK26-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
26522 // CHECK26-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1
26523 // CHECK26-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !15
26524 // CHECK26-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
26525 // CHECK26-NEXT:    [[TMP38:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
26526 // CHECK26-NEXT:    [[CONV65:%.*]] = fpext float [[TMP38]] to double
26527 // CHECK26-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
26528 // CHECK26-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
26529 // CHECK26-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
26530 // CHECK26-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
26531 // CHECK26-NEXT:    [[TMP39:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !15
26532 // CHECK26-NEXT:    [[CONV69:%.*]] = fpext float [[TMP39]] to double
26533 // CHECK26-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
26534 // CHECK26-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
26535 // CHECK26-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !15
26536 // CHECK26-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
26537 // CHECK26-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i64 0, i64 2
26538 // CHECK26-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !15
26539 // CHECK26-NEXT:    [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00
26540 // CHECK26-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !15
26541 // CHECK26-NEXT:    [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]]
26542 // CHECK26-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP41]]
26543 // CHECK26-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i64 3
26544 // CHECK26-NEXT:    [[TMP42:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !15
26545 // CHECK26-NEXT:    [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00
26546 // CHECK26-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !15
26547 // CHECK26-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
26548 // CHECK26-NEXT:    [[TMP43:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
26549 // CHECK26-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1
26550 // CHECK26-NEXT:    store i64 [[ADD78]], i64* [[X]], align 8, !llvm.access.group !15
26551 // CHECK26-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
26552 // CHECK26-NEXT:    [[TMP44:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
26553 // CHECK26-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP44]] to i32
26554 // CHECK26-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
26555 // CHECK26-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
26556 // CHECK26-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 8, !llvm.access.group !15
26557 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
26558 // CHECK26:       omp.body.continue82:
26559 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
26560 // CHECK26:       omp.inner.for.inc83:
26561 // CHECK26-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
26562 // CHECK26-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1
26563 // CHECK26-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !15
26564 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP16:![0-9]+]]
26565 // CHECK26:       omp.inner.for.end85:
26566 // CHECK26-NEXT:    store i32 10, i32* [[I58]], align 4
26567 // CHECK26-NEXT:    [[TMP46:%.*]] = load i32, i32* [[A]], align 4
26568 // CHECK26-NEXT:    [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
26569 // CHECK26-NEXT:    call void @llvm.stackrestore(i8* [[TMP47]])
26570 // CHECK26-NEXT:    ret i32 [[TMP46]]
26571 //
26572 //
26573 // CHECK26-LABEL: define {{[^@]+}}@_Z3bari
26574 // CHECK26-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
26575 // CHECK26-NEXT:  entry:
26576 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26577 // CHECK26-NEXT:    [[A:%.*]] = alloca i32, align 4
26578 // CHECK26-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
26579 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26580 // CHECK26-NEXT:    store i32 0, i32* [[A]], align 4
26581 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26582 // CHECK26-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
26583 // CHECK26-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
26584 // CHECK26-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
26585 // CHECK26-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
26586 // CHECK26-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26587 // CHECK26-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
26588 // CHECK26-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
26589 // CHECK26-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
26590 // CHECK26-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
26591 // CHECK26-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
26592 // CHECK26-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
26593 // CHECK26-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
26594 // CHECK26-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
26595 // CHECK26-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
26596 // CHECK26-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
26597 // CHECK26-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
26598 // CHECK26-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
26599 // CHECK26-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
26600 // CHECK26-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
26601 // CHECK26-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
26602 // CHECK26-NEXT:    ret i32 [[TMP8]]
26603 //
26604 //
26605 // CHECK26-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
26606 // CHECK26-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
26607 // CHECK26-NEXT:  entry:
26608 // CHECK26-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
26609 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26610 // CHECK26-NEXT:    [[B:%.*]] = alloca i32, align 4
26611 // CHECK26-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
26612 // CHECK26-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
26613 // CHECK26-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26614 // CHECK26-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26615 // CHECK26-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26616 // CHECK26-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26617 // CHECK26-NEXT:    [[I:%.*]] = alloca i32, align 4
26618 // CHECK26-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
26619 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26620 // CHECK26-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
26621 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26622 // CHECK26-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
26623 // CHECK26-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
26624 // CHECK26-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
26625 // CHECK26-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
26626 // CHECK26-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
26627 // CHECK26-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
26628 // CHECK26-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
26629 // CHECK26-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
26630 // CHECK26-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
26631 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26632 // CHECK26-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
26633 // CHECK26-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26634 // CHECK26-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
26635 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26636 // CHECK26:       omp.inner.for.cond:
26637 // CHECK26-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
26638 // CHECK26-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
26639 // CHECK26-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
26640 // CHECK26-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26641 // CHECK26:       omp.inner.for.body:
26642 // CHECK26-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
26643 // CHECK26-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
26644 // CHECK26-NEXT:    [[ADD2:%.*]] = add nsw i32 0, [[MUL]]
26645 // CHECK26-NEXT:    store i32 [[ADD2]], i32* [[I]], align 4, !llvm.access.group !18
26646 // CHECK26-NEXT:    [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
26647 // CHECK26-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP9]] to double
26648 // CHECK26-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
26649 // CHECK26-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
26650 // CHECK26-NEXT:    store double [[ADD3]], double* [[A]], align 8, !llvm.access.group !18
26651 // CHECK26-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
26652 // CHECK26-NEXT:    [[TMP10:%.*]] = load double, double* [[A4]], align 8, !llvm.access.group !18
26653 // CHECK26-NEXT:    [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00
26654 // CHECK26-NEXT:    store double [[INC]], double* [[A4]], align 8, !llvm.access.group !18
26655 // CHECK26-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
26656 // CHECK26-NEXT:    [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]]
26657 // CHECK26-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]]
26658 // CHECK26-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
26659 // CHECK26-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18
26660 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26661 // CHECK26:       omp.body.continue:
26662 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26663 // CHECK26:       omp.inner.for.inc:
26664 // CHECK26-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
26665 // CHECK26-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1
26666 // CHECK26-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
26667 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
26668 // CHECK26:       omp.inner.for.end:
26669 // CHECK26-NEXT:    store i32 10, i32* [[I]], align 4
26670 // CHECK26-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
26671 // CHECK26-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
26672 // CHECK26-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX8]], i64 1
26673 // CHECK26-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX9]], align 2
26674 // CHECK26-NEXT:    [[CONV10:%.*]] = sext i16 [[TMP14]] to i32
26675 // CHECK26-NEXT:    [[TMP15:%.*]] = load i32, i32* [[B]], align 4
26676 // CHECK26-NEXT:    [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP15]]
26677 // CHECK26-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
26678 // CHECK26-NEXT:    call void @llvm.stackrestore(i8* [[TMP16]])
26679 // CHECK26-NEXT:    ret i32 [[ADD11]]
26680 //
26681 //
26682 // CHECK26-LABEL: define {{[^@]+}}@_ZL7fstatici
26683 // CHECK26-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
26684 // CHECK26-NEXT:  entry:
26685 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26686 // CHECK26-NEXT:    [[A:%.*]] = alloca i32, align 4
26687 // CHECK26-NEXT:    [[AA:%.*]] = alloca i16, align 2
26688 // CHECK26-NEXT:    [[AAA:%.*]] = alloca i8, align 1
26689 // CHECK26-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
26690 // CHECK26-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26691 // CHECK26-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26692 // CHECK26-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
26693 // CHECK26-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26694 // CHECK26-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26695 // CHECK26-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26696 // CHECK26-NEXT:    [[I:%.*]] = alloca i32, align 4
26697 // CHECK26-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26698 // CHECK26-NEXT:    [[I5:%.*]] = alloca i32, align 4
26699 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26700 // CHECK26-NEXT:    store i32 0, i32* [[A]], align 4
26701 // CHECK26-NEXT:    store i16 0, i16* [[AA]], align 2
26702 // CHECK26-NEXT:    store i8 0, i8* [[AAA]], align 1
26703 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
26704 // CHECK26-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
26705 // CHECK26-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
26706 // CHECK26-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
26707 // CHECK26-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26708 // CHECK26-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26709 // CHECK26-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
26710 // CHECK26-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
26711 // CHECK26-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
26712 // CHECK26-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
26713 // CHECK26-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
26714 // CHECK26-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26715 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26716 // CHECK26-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
26717 // CHECK26-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
26718 // CHECK26-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26719 // CHECK26-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
26720 // CHECK26-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26721 // CHECK26-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26722 // CHECK26-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
26723 // CHECK26-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
26724 // CHECK26:       simd.if.then:
26725 // CHECK26-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26726 // CHECK26-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
26727 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26728 // CHECK26:       omp.inner.for.cond:
26729 // CHECK26-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
26730 // CHECK26-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
26731 // CHECK26-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
26732 // CHECK26-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
26733 // CHECK26-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26734 // CHECK26:       omp.inner.for.body:
26735 // CHECK26-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !21
26736 // CHECK26-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
26737 // CHECK26-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
26738 // CHECK26-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
26739 // CHECK26-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !21
26740 // CHECK26-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21
26741 // CHECK26-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
26742 // CHECK26-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !21
26743 // CHECK26-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21
26744 // CHECK26-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
26745 // CHECK26-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
26746 // CHECK26-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
26747 // CHECK26-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !21
26748 // CHECK26-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !21
26749 // CHECK26-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
26750 // CHECK26-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
26751 // CHECK26-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
26752 // CHECK26-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !21
26753 // CHECK26-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
26754 // CHECK26-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
26755 // CHECK26-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
26756 // CHECK26-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
26757 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26758 // CHECK26:       omp.body.continue:
26759 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26760 // CHECK26:       omp.inner.for.inc:
26761 // CHECK26-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
26762 // CHECK26-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
26763 // CHECK26-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
26764 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
26765 // CHECK26:       omp.inner.for.end:
26766 // CHECK26-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26767 // CHECK26-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
26768 // CHECK26-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
26769 // CHECK26-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
26770 // CHECK26-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
26771 // CHECK26-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
26772 // CHECK26-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
26773 // CHECK26-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
26774 // CHECK26-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
26775 // CHECK26-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
26776 // CHECK26-NEXT:    br label [[SIMD_IF_END]]
26777 // CHECK26:       simd.if.end:
26778 // CHECK26-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
26779 // CHECK26-NEXT:    ret i32 [[TMP21]]
26780 //
26781 //
26782 // CHECK26-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
26783 // CHECK26-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
26784 // CHECK26-NEXT:  entry:
26785 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26786 // CHECK26-NEXT:    [[A:%.*]] = alloca i32, align 4
26787 // CHECK26-NEXT:    [[AA:%.*]] = alloca i16, align 2
26788 // CHECK26-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
26789 // CHECK26-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26790 // CHECK26-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26791 // CHECK26-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26792 // CHECK26-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26793 // CHECK26-NEXT:    [[I:%.*]] = alloca i32, align 4
26794 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26795 // CHECK26-NEXT:    store i32 0, i32* [[A]], align 4
26796 // CHECK26-NEXT:    store i16 0, i16* [[AA]], align 2
26797 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26798 // CHECK26-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
26799 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26800 // CHECK26-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
26801 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26802 // CHECK26:       omp.inner.for.cond:
26803 // CHECK26-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
26804 // CHECK26-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
26805 // CHECK26-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
26806 // CHECK26-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26807 // CHECK26:       omp.inner.for.body:
26808 // CHECK26-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
26809 // CHECK26-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
26810 // CHECK26-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26811 // CHECK26-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
26812 // CHECK26-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
26813 // CHECK26-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
26814 // CHECK26-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24
26815 // CHECK26-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
26816 // CHECK26-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
26817 // CHECK26-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
26818 // CHECK26-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
26819 // CHECK26-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24
26820 // CHECK26-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
26821 // CHECK26-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
26822 // CHECK26-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
26823 // CHECK26-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
26824 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26825 // CHECK26:       omp.body.continue:
26826 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26827 // CHECK26:       omp.inner.for.inc:
26828 // CHECK26-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
26829 // CHECK26-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
26830 // CHECK26-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
26831 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
26832 // CHECK26:       omp.inner.for.end:
26833 // CHECK26-NEXT:    store i32 10, i32* [[I]], align 4
26834 // CHECK26-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
26835 // CHECK26-NEXT:    ret i32 [[TMP8]]
26836 //
26837 //
26838 // CHECK27-LABEL: define {{[^@]+}}@_Z3fooi
26839 // CHECK27-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
26840 // CHECK27-NEXT:  entry:
26841 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26842 // CHECK27-NEXT:    [[A:%.*]] = alloca i32, align 4
26843 // CHECK27-NEXT:    [[AA:%.*]] = alloca i16, align 2
26844 // CHECK27-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
26845 // CHECK27-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
26846 // CHECK27-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
26847 // CHECK27-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
26848 // CHECK27-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
26849 // CHECK27-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
26850 // CHECK27-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26851 // CHECK27-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
26852 // CHECK27-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26853 // CHECK27-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26854 // CHECK27-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26855 // CHECK27-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26856 // CHECK27-NEXT:    [[I:%.*]] = alloca i32, align 4
26857 // CHECK27-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
26858 // CHECK27-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
26859 // CHECK27-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
26860 // CHECK27-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
26861 // CHECK27-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
26862 // CHECK27-NEXT:    [[A8:%.*]] = alloca i32, align 4
26863 // CHECK27-NEXT:    [[A9:%.*]] = alloca i32, align 4
26864 // CHECK27-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
26865 // CHECK27-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
26866 // CHECK27-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
26867 // CHECK27-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
26868 // CHECK27-NEXT:    [[I24:%.*]] = alloca i32, align 4
26869 // CHECK27-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
26870 // CHECK27-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
26871 // CHECK27-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
26872 // CHECK27-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
26873 // CHECK27-NEXT:    [[I40:%.*]] = alloca i32, align 4
26874 // CHECK27-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
26875 // CHECK27-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
26876 // CHECK27-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
26877 // CHECK27-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
26878 // CHECK27-NEXT:    [[I58:%.*]] = alloca i32, align 4
26879 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26880 // CHECK27-NEXT:    store i32 0, i32* [[A]], align 4
26881 // CHECK27-NEXT:    store i16 0, i16* [[AA]], align 2
26882 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26883 // CHECK27-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
26884 // CHECK27-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
26885 // CHECK27-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
26886 // CHECK27-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
26887 // CHECK27-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26888 // CHECK27-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
26889 // CHECK27-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
26890 // CHECK27-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
26891 // CHECK27-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
26892 // CHECK27-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
26893 // CHECK27-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
26894 // CHECK27-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_2]], align 4
26895 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26896 // CHECK27-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
26897 // CHECK27-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26898 // CHECK27-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
26899 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26900 // CHECK27:       omp.inner.for.cond:
26901 // CHECK27-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
26902 // CHECK27-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
26903 // CHECK27-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
26904 // CHECK27-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26905 // CHECK27:       omp.inner.for.body:
26906 // CHECK27-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
26907 // CHECK27-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
26908 // CHECK27-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
26909 // CHECK27-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
26910 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26911 // CHECK27:       omp.body.continue:
26912 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26913 // CHECK27:       omp.inner.for.inc:
26914 // CHECK27-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
26915 // CHECK27-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
26916 // CHECK27-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
26917 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
26918 // CHECK27:       omp.inner.for.end:
26919 // CHECK27-NEXT:    store i32 10, i32* [[I]], align 4
26920 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
26921 // CHECK27-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
26922 // CHECK27-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
26923 // CHECK27-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV7]], align 4
26924 // CHECK27-NEXT:    [[TMP12:%.*]] = load i32, i32* [[A]], align 4
26925 // CHECK27-NEXT:    store i32 [[TMP12]], i32* [[DOTLINEAR_START]], align 4
26926 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
26927 // CHECK27:       omp.inner.for.cond10:
26928 // CHECK27-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
26929 // CHECK27-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
26930 // CHECK27-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
26931 // CHECK27-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
26932 // CHECK27:       omp.inner.for.body12:
26933 // CHECK27-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
26934 // CHECK27-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1
26935 // CHECK27-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
26936 // CHECK27-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4
26937 // CHECK27-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A8]], align 4
26938 // CHECK27-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
26939 // CHECK27-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4
26940 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
26941 // CHECK27:       omp.body.continue16:
26942 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
26943 // CHECK27:       omp.inner.for.inc17:
26944 // CHECK27-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
26945 // CHECK27-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
26946 // CHECK27-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
26947 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]]
26948 // CHECK27:       omp.inner.for.end19:
26949 // CHECK27-NEXT:    store i32 10, i32* [[A]], align 4
26950 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
26951 // CHECK27-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
26952 // CHECK27-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
26953 // CHECK27-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV23]], align 4
26954 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
26955 // CHECK27:       omp.inner.for.cond25:
26956 // CHECK27-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
26957 // CHECK27-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !10
26958 // CHECK27-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
26959 // CHECK27-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
26960 // CHECK27:       omp.inner.for.body27:
26961 // CHECK27-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
26962 // CHECK27-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1
26963 // CHECK27-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
26964 // CHECK27-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !10
26965 // CHECK27-NEXT:    [[TMP22:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
26966 // CHECK27-NEXT:    [[CONV:%.*]] = sext i16 [[TMP22]] to i32
26967 // CHECK27-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
26968 // CHECK27-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
26969 // CHECK27-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !10
26970 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
26971 // CHECK27:       omp.body.continue32:
26972 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
26973 // CHECK27:       omp.inner.for.inc33:
26974 // CHECK27-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
26975 // CHECK27-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1
26976 // CHECK27-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
26977 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]]
26978 // CHECK27:       omp.inner.for.end35:
26979 // CHECK27-NEXT:    store i32 10, i32* [[I24]], align 4
26980 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
26981 // CHECK27-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
26982 // CHECK27-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
26983 // CHECK27-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV39]], align 4
26984 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
26985 // CHECK27:       omp.inner.for.cond41:
26986 // CHECK27-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
26987 // CHECK27-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !13
26988 // CHECK27-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
26989 // CHECK27-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
26990 // CHECK27:       omp.inner.for.body43:
26991 // CHECK27-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
26992 // CHECK27-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1
26993 // CHECK27-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
26994 // CHECK27-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !13
26995 // CHECK27-NEXT:    [[TMP28:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
26996 // CHECK27-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1
26997 // CHECK27-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !13
26998 // CHECK27-NEXT:    [[TMP29:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
26999 // CHECK27-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP29]] to i32
27000 // CHECK27-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
27001 // CHECK27-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
27002 // CHECK27-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !13
27003 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
27004 // CHECK27:       omp.body.continue50:
27005 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
27006 // CHECK27:       omp.inner.for.inc51:
27007 // CHECK27-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
27008 // CHECK27-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1
27009 // CHECK27-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
27010 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]]
27011 // CHECK27:       omp.inner.for.end53:
27012 // CHECK27-NEXT:    store i32 10, i32* [[I40]], align 4
27013 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
27014 // CHECK27-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
27015 // CHECK27-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
27016 // CHECK27-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV57]], align 4
27017 // CHECK27-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 0
27018 // CHECK27-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
27019 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
27020 // CHECK27:       omp.inner.for.cond59:
27021 // CHECK27-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
27022 // CHECK27-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !16
27023 // CHECK27-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]]
27024 // CHECK27-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
27025 // CHECK27:       omp.inner.for.body61:
27026 // CHECK27-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
27027 // CHECK27-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1
27028 // CHECK27-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
27029 // CHECK27-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !16
27030 // CHECK27-NEXT:    [[TMP35:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
27031 // CHECK27-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1
27032 // CHECK27-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !16
27033 // CHECK27-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
27034 // CHECK27-NEXT:    [[TMP36:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
27035 // CHECK27-NEXT:    [[CONV65:%.*]] = fpext float [[TMP36]] to double
27036 // CHECK27-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
27037 // CHECK27-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
27038 // CHECK27-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
27039 // CHECK27-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
27040 // CHECK27-NEXT:    [[TMP37:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
27041 // CHECK27-NEXT:    [[CONV69:%.*]] = fpext float [[TMP37]] to double
27042 // CHECK27-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
27043 // CHECK27-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
27044 // CHECK27-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
27045 // CHECK27-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
27046 // CHECK27-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i32 0, i32 2
27047 // CHECK27-NEXT:    [[TMP38:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
27048 // CHECK27-NEXT:    [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00
27049 // CHECK27-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
27050 // CHECK27-NEXT:    [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]]
27051 // CHECK27-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP39]]
27052 // CHECK27-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i32 3
27053 // CHECK27-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
27054 // CHECK27-NEXT:    [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00
27055 // CHECK27-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
27056 // CHECK27-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
27057 // CHECK27-NEXT:    [[TMP41:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
27058 // CHECK27-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1
27059 // CHECK27-NEXT:    store i64 [[ADD78]], i64* [[X]], align 4, !llvm.access.group !16
27060 // CHECK27-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
27061 // CHECK27-NEXT:    [[TMP42:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
27062 // CHECK27-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP42]] to i32
27063 // CHECK27-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
27064 // CHECK27-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
27065 // CHECK27-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 4, !llvm.access.group !16
27066 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
27067 // CHECK27:       omp.body.continue82:
27068 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
27069 // CHECK27:       omp.inner.for.inc83:
27070 // CHECK27-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
27071 // CHECK27-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1
27072 // CHECK27-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
27073 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]]
27074 // CHECK27:       omp.inner.for.end85:
27075 // CHECK27-NEXT:    store i32 10, i32* [[I58]], align 4
27076 // CHECK27-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
27077 // CHECK27-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
27078 // CHECK27-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
27079 // CHECK27-NEXT:    ret i32 [[TMP44]]
27080 //
27081 //
27082 // CHECK27-LABEL: define {{[^@]+}}@_Z3bari
27083 // CHECK27-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
27084 // CHECK27-NEXT:  entry:
27085 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27086 // CHECK27-NEXT:    [[A:%.*]] = alloca i32, align 4
27087 // CHECK27-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
27088 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27089 // CHECK27-NEXT:    store i32 0, i32* [[A]], align 4
27090 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27091 // CHECK27-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
27092 // CHECK27-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
27093 // CHECK27-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
27094 // CHECK27-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
27095 // CHECK27-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
27096 // CHECK27-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
27097 // CHECK27-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
27098 // CHECK27-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
27099 // CHECK27-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
27100 // CHECK27-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
27101 // CHECK27-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
27102 // CHECK27-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
27103 // CHECK27-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
27104 // CHECK27-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
27105 // CHECK27-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
27106 // CHECK27-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
27107 // CHECK27-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
27108 // CHECK27-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
27109 // CHECK27-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
27110 // CHECK27-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
27111 // CHECK27-NEXT:    ret i32 [[TMP8]]
27112 //
27113 //
27114 // CHECK27-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
27115 // CHECK27-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
27116 // CHECK27-NEXT:  entry:
27117 // CHECK27-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
27118 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27119 // CHECK27-NEXT:    [[B:%.*]] = alloca i32, align 4
27120 // CHECK27-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
27121 // CHECK27-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
27122 // CHECK27-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27123 // CHECK27-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27124 // CHECK27-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27125 // CHECK27-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27126 // CHECK27-NEXT:    [[I:%.*]] = alloca i32, align 4
27127 // CHECK27-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
27128 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27129 // CHECK27-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
27130 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27131 // CHECK27-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
27132 // CHECK27-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
27133 // CHECK27-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
27134 // CHECK27-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
27135 // CHECK27-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
27136 // CHECK27-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
27137 // CHECK27-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
27138 // CHECK27-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
27139 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27140 // CHECK27-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
27141 // CHECK27-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27142 // CHECK27-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
27143 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27144 // CHECK27:       omp.inner.for.cond:
27145 // CHECK27-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
27146 // CHECK27-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
27147 // CHECK27-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
27148 // CHECK27-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27149 // CHECK27:       omp.inner.for.body:
27150 // CHECK27-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
27151 // CHECK27-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
27152 // CHECK27-NEXT:    [[ADD2:%.*]] = add nsw i32 0, [[MUL]]
27153 // CHECK27-NEXT:    store i32 [[ADD2]], i32* [[I]], align 4, !llvm.access.group !19
27154 // CHECK27-NEXT:    [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
27155 // CHECK27-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP8]] to double
27156 // CHECK27-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
27157 // CHECK27-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
27158 // CHECK27-NEXT:    store double [[ADD3]], double* [[A]], align 4, !llvm.access.group !19
27159 // CHECK27-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
27160 // CHECK27-NEXT:    [[TMP9:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !19
27161 // CHECK27-NEXT:    [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00
27162 // CHECK27-NEXT:    store double [[INC]], double* [[A4]], align 4, !llvm.access.group !19
27163 // CHECK27-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
27164 // CHECK27-NEXT:    [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]]
27165 // CHECK27-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]]
27166 // CHECK27-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
27167 // CHECK27-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19
27168 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27169 // CHECK27:       omp.body.continue:
27170 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27171 // CHECK27:       omp.inner.for.inc:
27172 // CHECK27-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
27173 // CHECK27-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
27174 // CHECK27-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
27175 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
27176 // CHECK27:       omp.inner.for.end:
27177 // CHECK27-NEXT:    store i32 10, i32* [[I]], align 4
27178 // CHECK27-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
27179 // CHECK27-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
27180 // CHECK27-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX8]], i32 1
27181 // CHECK27-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX9]], align 2
27182 // CHECK27-NEXT:    [[CONV10:%.*]] = sext i16 [[TMP13]] to i32
27183 // CHECK27-NEXT:    [[TMP14:%.*]] = load i32, i32* [[B]], align 4
27184 // CHECK27-NEXT:    [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP14]]
27185 // CHECK27-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
27186 // CHECK27-NEXT:    call void @llvm.stackrestore(i8* [[TMP15]])
27187 // CHECK27-NEXT:    ret i32 [[ADD11]]
27188 //
27189 //
27190 // CHECK27-LABEL: define {{[^@]+}}@_ZL7fstatici
27191 // CHECK27-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
27192 // CHECK27-NEXT:  entry:
27193 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27194 // CHECK27-NEXT:    [[A:%.*]] = alloca i32, align 4
27195 // CHECK27-NEXT:    [[AA:%.*]] = alloca i16, align 2
27196 // CHECK27-NEXT:    [[AAA:%.*]] = alloca i8, align 1
27197 // CHECK27-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
27198 // CHECK27-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27199 // CHECK27-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27200 // CHECK27-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27201 // CHECK27-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
27202 // CHECK27-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27203 // CHECK27-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27204 // CHECK27-NEXT:    [[I:%.*]] = alloca i32, align 4
27205 // CHECK27-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27206 // CHECK27-NEXT:    [[I5:%.*]] = alloca i32, align 4
27207 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27208 // CHECK27-NEXT:    store i32 0, i32* [[A]], align 4
27209 // CHECK27-NEXT:    store i16 0, i16* [[AA]], align 2
27210 // CHECK27-NEXT:    store i8 0, i8* [[AAA]], align 1
27211 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
27212 // CHECK27-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
27213 // CHECK27-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
27214 // CHECK27-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27215 // CHECK27-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27216 // CHECK27-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27217 // CHECK27-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
27218 // CHECK27-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
27219 // CHECK27-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
27220 // CHECK27-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
27221 // CHECK27-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
27222 // CHECK27-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
27223 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27224 // CHECK27-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27225 // CHECK27-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
27226 // CHECK27-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27227 // CHECK27-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
27228 // CHECK27-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27229 // CHECK27-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27230 // CHECK27-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
27231 // CHECK27-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
27232 // CHECK27:       simd.if.then:
27233 // CHECK27-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27234 // CHECK27-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
27235 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27236 // CHECK27:       omp.inner.for.cond:
27237 // CHECK27-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
27238 // CHECK27-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
27239 // CHECK27-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
27240 // CHECK27-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
27241 // CHECK27-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27242 // CHECK27:       omp.inner.for.body:
27243 // CHECK27-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !22
27244 // CHECK27-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
27245 // CHECK27-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
27246 // CHECK27-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
27247 // CHECK27-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !22
27248 // CHECK27-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22
27249 // CHECK27-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
27250 // CHECK27-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !22
27251 // CHECK27-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22
27252 // CHECK27-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
27253 // CHECK27-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
27254 // CHECK27-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
27255 // CHECK27-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !22
27256 // CHECK27-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !22
27257 // CHECK27-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
27258 // CHECK27-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
27259 // CHECK27-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
27260 // CHECK27-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !22
27261 // CHECK27-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
27262 // CHECK27-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
27263 // CHECK27-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
27264 // CHECK27-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
27265 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27266 // CHECK27:       omp.body.continue:
27267 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27268 // CHECK27:       omp.inner.for.inc:
27269 // CHECK27-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
27270 // CHECK27-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
27271 // CHECK27-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
27272 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
27273 // CHECK27:       omp.inner.for.end:
27274 // CHECK27-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27275 // CHECK27-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27276 // CHECK27-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27277 // CHECK27-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
27278 // CHECK27-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
27279 // CHECK27-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
27280 // CHECK27-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
27281 // CHECK27-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
27282 // CHECK27-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
27283 // CHECK27-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
27284 // CHECK27-NEXT:    br label [[SIMD_IF_END]]
27285 // CHECK27:       simd.if.end:
27286 // CHECK27-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
27287 // CHECK27-NEXT:    ret i32 [[TMP21]]
27288 //
27289 //
27290 // CHECK27-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
27291 // CHECK27-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
27292 // CHECK27-NEXT:  entry:
27293 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27294 // CHECK27-NEXT:    [[A:%.*]] = alloca i32, align 4
27295 // CHECK27-NEXT:    [[AA:%.*]] = alloca i16, align 2
27296 // CHECK27-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
27297 // CHECK27-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27298 // CHECK27-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27299 // CHECK27-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27300 // CHECK27-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27301 // CHECK27-NEXT:    [[I:%.*]] = alloca i32, align 4
27302 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27303 // CHECK27-NEXT:    store i32 0, i32* [[A]], align 4
27304 // CHECK27-NEXT:    store i16 0, i16* [[AA]], align 2
27305 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27306 // CHECK27-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
27307 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27308 // CHECK27-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
27309 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27310 // CHECK27:       omp.inner.for.cond:
27311 // CHECK27-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
27312 // CHECK27-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
27313 // CHECK27-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
27314 // CHECK27-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27315 // CHECK27:       omp.inner.for.body:
27316 // CHECK27-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
27317 // CHECK27-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
27318 // CHECK27-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27319 // CHECK27-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
27320 // CHECK27-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
27321 // CHECK27-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
27322 // CHECK27-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25
27323 // CHECK27-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
27324 // CHECK27-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
27325 // CHECK27-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
27326 // CHECK27-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
27327 // CHECK27-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25
27328 // CHECK27-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
27329 // CHECK27-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
27330 // CHECK27-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
27331 // CHECK27-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
27332 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27333 // CHECK27:       omp.body.continue:
27334 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27335 // CHECK27:       omp.inner.for.inc:
27336 // CHECK27-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
27337 // CHECK27-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
27338 // CHECK27-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
27339 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
27340 // CHECK27:       omp.inner.for.end:
27341 // CHECK27-NEXT:    store i32 10, i32* [[I]], align 4
27342 // CHECK27-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
27343 // CHECK27-NEXT:    ret i32 [[TMP8]]
27344 //
27345 //
27346 // CHECK28-LABEL: define {{[^@]+}}@_Z3fooi
27347 // CHECK28-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
27348 // CHECK28-NEXT:  entry:
27349 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27350 // CHECK28-NEXT:    [[A:%.*]] = alloca i32, align 4
27351 // CHECK28-NEXT:    [[AA:%.*]] = alloca i16, align 2
27352 // CHECK28-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
27353 // CHECK28-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
27354 // CHECK28-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
27355 // CHECK28-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
27356 // CHECK28-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
27357 // CHECK28-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
27358 // CHECK28-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27359 // CHECK28-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
27360 // CHECK28-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27361 // CHECK28-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27362 // CHECK28-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27363 // CHECK28-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27364 // CHECK28-NEXT:    [[I:%.*]] = alloca i32, align 4
27365 // CHECK28-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
27366 // CHECK28-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
27367 // CHECK28-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
27368 // CHECK28-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
27369 // CHECK28-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
27370 // CHECK28-NEXT:    [[A8:%.*]] = alloca i32, align 4
27371 // CHECK28-NEXT:    [[A9:%.*]] = alloca i32, align 4
27372 // CHECK28-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
27373 // CHECK28-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
27374 // CHECK28-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
27375 // CHECK28-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
27376 // CHECK28-NEXT:    [[I24:%.*]] = alloca i32, align 4
27377 // CHECK28-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
27378 // CHECK28-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
27379 // CHECK28-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
27380 // CHECK28-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
27381 // CHECK28-NEXT:    [[I40:%.*]] = alloca i32, align 4
27382 // CHECK28-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
27383 // CHECK28-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
27384 // CHECK28-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
27385 // CHECK28-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
27386 // CHECK28-NEXT:    [[I58:%.*]] = alloca i32, align 4
27387 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27388 // CHECK28-NEXT:    store i32 0, i32* [[A]], align 4
27389 // CHECK28-NEXT:    store i16 0, i16* [[AA]], align 2
27390 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27391 // CHECK28-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
27392 // CHECK28-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
27393 // CHECK28-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
27394 // CHECK28-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
27395 // CHECK28-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
27396 // CHECK28-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
27397 // CHECK28-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
27398 // CHECK28-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
27399 // CHECK28-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
27400 // CHECK28-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
27401 // CHECK28-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
27402 // CHECK28-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_2]], align 4
27403 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27404 // CHECK28-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
27405 // CHECK28-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27406 // CHECK28-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
27407 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27408 // CHECK28:       omp.inner.for.cond:
27409 // CHECK28-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
27410 // CHECK28-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
27411 // CHECK28-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
27412 // CHECK28-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27413 // CHECK28:       omp.inner.for.body:
27414 // CHECK28-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
27415 // CHECK28-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
27416 // CHECK28-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27417 // CHECK28-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
27418 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27419 // CHECK28:       omp.body.continue:
27420 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27421 // CHECK28:       omp.inner.for.inc:
27422 // CHECK28-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
27423 // CHECK28-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
27424 // CHECK28-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
27425 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
27426 // CHECK28:       omp.inner.for.end:
27427 // CHECK28-NEXT:    store i32 10, i32* [[I]], align 4
27428 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
27429 // CHECK28-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
27430 // CHECK28-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
27431 // CHECK28-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV7]], align 4
27432 // CHECK28-NEXT:    [[TMP12:%.*]] = load i32, i32* [[A]], align 4
27433 // CHECK28-NEXT:    store i32 [[TMP12]], i32* [[DOTLINEAR_START]], align 4
27434 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
27435 // CHECK28:       omp.inner.for.cond10:
27436 // CHECK28-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
27437 // CHECK28-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
27438 // CHECK28-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
27439 // CHECK28-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
27440 // CHECK28:       omp.inner.for.body12:
27441 // CHECK28-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
27442 // CHECK28-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1
27443 // CHECK28-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
27444 // CHECK28-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4
27445 // CHECK28-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A8]], align 4
27446 // CHECK28-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
27447 // CHECK28-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4
27448 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
27449 // CHECK28:       omp.body.continue16:
27450 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
27451 // CHECK28:       omp.inner.for.inc17:
27452 // CHECK28-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
27453 // CHECK28-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
27454 // CHECK28-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
27455 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]]
27456 // CHECK28:       omp.inner.for.end19:
27457 // CHECK28-NEXT:    store i32 10, i32* [[A]], align 4
27458 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
27459 // CHECK28-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
27460 // CHECK28-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
27461 // CHECK28-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV23]], align 4
27462 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
27463 // CHECK28:       omp.inner.for.cond25:
27464 // CHECK28-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
27465 // CHECK28-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !10
27466 // CHECK28-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
27467 // CHECK28-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
27468 // CHECK28:       omp.inner.for.body27:
27469 // CHECK28-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
27470 // CHECK28-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1
27471 // CHECK28-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
27472 // CHECK28-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !10
27473 // CHECK28-NEXT:    [[TMP22:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
27474 // CHECK28-NEXT:    [[CONV:%.*]] = sext i16 [[TMP22]] to i32
27475 // CHECK28-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
27476 // CHECK28-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
27477 // CHECK28-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !10
27478 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
27479 // CHECK28:       omp.body.continue32:
27480 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
27481 // CHECK28:       omp.inner.for.inc33:
27482 // CHECK28-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
27483 // CHECK28-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1
27484 // CHECK28-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
27485 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]]
27486 // CHECK28:       omp.inner.for.end35:
27487 // CHECK28-NEXT:    store i32 10, i32* [[I24]], align 4
27488 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
27489 // CHECK28-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
27490 // CHECK28-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
27491 // CHECK28-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV39]], align 4
27492 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
27493 // CHECK28:       omp.inner.for.cond41:
27494 // CHECK28-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
27495 // CHECK28-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !13
27496 // CHECK28-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
27497 // CHECK28-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
27498 // CHECK28:       omp.inner.for.body43:
27499 // CHECK28-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
27500 // CHECK28-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1
27501 // CHECK28-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
27502 // CHECK28-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !13
27503 // CHECK28-NEXT:    [[TMP28:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
27504 // CHECK28-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1
27505 // CHECK28-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !13
27506 // CHECK28-NEXT:    [[TMP29:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
27507 // CHECK28-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP29]] to i32
27508 // CHECK28-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
27509 // CHECK28-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
27510 // CHECK28-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !13
27511 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
27512 // CHECK28:       omp.body.continue50:
27513 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
27514 // CHECK28:       omp.inner.for.inc51:
27515 // CHECK28-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
27516 // CHECK28-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1
27517 // CHECK28-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
27518 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]]
27519 // CHECK28:       omp.inner.for.end53:
27520 // CHECK28-NEXT:    store i32 10, i32* [[I40]], align 4
27521 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
27522 // CHECK28-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
27523 // CHECK28-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
27524 // CHECK28-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV57]], align 4
27525 // CHECK28-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 0
27526 // CHECK28-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
27527 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
27528 // CHECK28:       omp.inner.for.cond59:
27529 // CHECK28-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
27530 // CHECK28-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !16
27531 // CHECK28-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]]
27532 // CHECK28-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
27533 // CHECK28:       omp.inner.for.body61:
27534 // CHECK28-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
27535 // CHECK28-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1
27536 // CHECK28-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
27537 // CHECK28-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !16
27538 // CHECK28-NEXT:    [[TMP35:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
27539 // CHECK28-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1
27540 // CHECK28-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !16
27541 // CHECK28-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
27542 // CHECK28-NEXT:    [[TMP36:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
27543 // CHECK28-NEXT:    [[CONV65:%.*]] = fpext float [[TMP36]] to double
27544 // CHECK28-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
27545 // CHECK28-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
27546 // CHECK28-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
27547 // CHECK28-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
27548 // CHECK28-NEXT:    [[TMP37:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
27549 // CHECK28-NEXT:    [[CONV69:%.*]] = fpext float [[TMP37]] to double
27550 // CHECK28-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
27551 // CHECK28-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
27552 // CHECK28-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
27553 // CHECK28-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
27554 // CHECK28-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i32 0, i32 2
27555 // CHECK28-NEXT:    [[TMP38:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
27556 // CHECK28-NEXT:    [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00
27557 // CHECK28-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
27558 // CHECK28-NEXT:    [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]]
27559 // CHECK28-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP39]]
27560 // CHECK28-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i32 3
27561 // CHECK28-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
27562 // CHECK28-NEXT:    [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00
27563 // CHECK28-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
27564 // CHECK28-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
27565 // CHECK28-NEXT:    [[TMP41:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
27566 // CHECK28-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1
27567 // CHECK28-NEXT:    store i64 [[ADD78]], i64* [[X]], align 4, !llvm.access.group !16
27568 // CHECK28-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
27569 // CHECK28-NEXT:    [[TMP42:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
27570 // CHECK28-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP42]] to i32
27571 // CHECK28-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
27572 // CHECK28-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
27573 // CHECK28-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 4, !llvm.access.group !16
27574 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
27575 // CHECK28:       omp.body.continue82:
27576 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
27577 // CHECK28:       omp.inner.for.inc83:
27578 // CHECK28-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
27579 // CHECK28-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1
27580 // CHECK28-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
27581 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]]
27582 // CHECK28:       omp.inner.for.end85:
27583 // CHECK28-NEXT:    store i32 10, i32* [[I58]], align 4
27584 // CHECK28-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
27585 // CHECK28-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
27586 // CHECK28-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
27587 // CHECK28-NEXT:    ret i32 [[TMP44]]
27588 //
27589 //
27590 // CHECK28-LABEL: define {{[^@]+}}@_Z3bari
27591 // CHECK28-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
27592 // CHECK28-NEXT:  entry:
27593 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27594 // CHECK28-NEXT:    [[A:%.*]] = alloca i32, align 4
27595 // CHECK28-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
27596 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27597 // CHECK28-NEXT:    store i32 0, i32* [[A]], align 4
27598 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27599 // CHECK28-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
27600 // CHECK28-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
27601 // CHECK28-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
27602 // CHECK28-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
27603 // CHECK28-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
27604 // CHECK28-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
27605 // CHECK28-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
27606 // CHECK28-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
27607 // CHECK28-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
27608 // CHECK28-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
27609 // CHECK28-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
27610 // CHECK28-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
27611 // CHECK28-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
27612 // CHECK28-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
27613 // CHECK28-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
27614 // CHECK28-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
27615 // CHECK28-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
27616 // CHECK28-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
27617 // CHECK28-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
27618 // CHECK28-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
27619 // CHECK28-NEXT:    ret i32 [[TMP8]]
27620 //
27621 //
27622 // CHECK28-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
27623 // CHECK28-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
27624 // CHECK28-NEXT:  entry:
27625 // CHECK28-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
27626 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27627 // CHECK28-NEXT:    [[B:%.*]] = alloca i32, align 4
27628 // CHECK28-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
27629 // CHECK28-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
27630 // CHECK28-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27631 // CHECK28-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27632 // CHECK28-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27633 // CHECK28-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27634 // CHECK28-NEXT:    [[I:%.*]] = alloca i32, align 4
27635 // CHECK28-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
27636 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27637 // CHECK28-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
27638 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27639 // CHECK28-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
27640 // CHECK28-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
27641 // CHECK28-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
27642 // CHECK28-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
27643 // CHECK28-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
27644 // CHECK28-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
27645 // CHECK28-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
27646 // CHECK28-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
27647 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27648 // CHECK28-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
27649 // CHECK28-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27650 // CHECK28-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
27651 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27652 // CHECK28:       omp.inner.for.cond:
27653 // CHECK28-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
27654 // CHECK28-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
27655 // CHECK28-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
27656 // CHECK28-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27657 // CHECK28:       omp.inner.for.body:
27658 // CHECK28-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
27659 // CHECK28-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
27660 // CHECK28-NEXT:    [[ADD2:%.*]] = add nsw i32 0, [[MUL]]
27661 // CHECK28-NEXT:    store i32 [[ADD2]], i32* [[I]], align 4, !llvm.access.group !19
27662 // CHECK28-NEXT:    [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
27663 // CHECK28-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP8]] to double
27664 // CHECK28-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
27665 // CHECK28-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
27666 // CHECK28-NEXT:    store double [[ADD3]], double* [[A]], align 4, !llvm.access.group !19
27667 // CHECK28-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
27668 // CHECK28-NEXT:    [[TMP9:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !19
27669 // CHECK28-NEXT:    [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00
27670 // CHECK28-NEXT:    store double [[INC]], double* [[A4]], align 4, !llvm.access.group !19
27671 // CHECK28-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
27672 // CHECK28-NEXT:    [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]]
27673 // CHECK28-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]]
27674 // CHECK28-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
27675 // CHECK28-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19
27676 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27677 // CHECK28:       omp.body.continue:
27678 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27679 // CHECK28:       omp.inner.for.inc:
27680 // CHECK28-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
27681 // CHECK28-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
27682 // CHECK28-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
27683 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
27684 // CHECK28:       omp.inner.for.end:
27685 // CHECK28-NEXT:    store i32 10, i32* [[I]], align 4
27686 // CHECK28-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
27687 // CHECK28-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
27688 // CHECK28-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX8]], i32 1
27689 // CHECK28-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX9]], align 2
27690 // CHECK28-NEXT:    [[CONV10:%.*]] = sext i16 [[TMP13]] to i32
27691 // CHECK28-NEXT:    [[TMP14:%.*]] = load i32, i32* [[B]], align 4
27692 // CHECK28-NEXT:    [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP14]]
27693 // CHECK28-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
27694 // CHECK28-NEXT:    call void @llvm.stackrestore(i8* [[TMP15]])
27695 // CHECK28-NEXT:    ret i32 [[ADD11]]
27696 //
27697 //
27698 // CHECK28-LABEL: define {{[^@]+}}@_ZL7fstatici
27699 // CHECK28-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
27700 // CHECK28-NEXT:  entry:
27701 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27702 // CHECK28-NEXT:    [[A:%.*]] = alloca i32, align 4
27703 // CHECK28-NEXT:    [[AA:%.*]] = alloca i16, align 2
27704 // CHECK28-NEXT:    [[AAA:%.*]] = alloca i8, align 1
27705 // CHECK28-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
27706 // CHECK28-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27707 // CHECK28-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27708 // CHECK28-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
27709 // CHECK28-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
27710 // CHECK28-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27711 // CHECK28-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27712 // CHECK28-NEXT:    [[I:%.*]] = alloca i32, align 4
27713 // CHECK28-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27714 // CHECK28-NEXT:    [[I5:%.*]] = alloca i32, align 4
27715 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27716 // CHECK28-NEXT:    store i32 0, i32* [[A]], align 4
27717 // CHECK28-NEXT:    store i16 0, i16* [[AA]], align 2
27718 // CHECK28-NEXT:    store i8 0, i8* [[AAA]], align 1
27719 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
27720 // CHECK28-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
27721 // CHECK28-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
27722 // CHECK28-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
27723 // CHECK28-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27724 // CHECK28-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27725 // CHECK28-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
27726 // CHECK28-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
27727 // CHECK28-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
27728 // CHECK28-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
27729 // CHECK28-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
27730 // CHECK28-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
27731 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27732 // CHECK28-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
27733 // CHECK28-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
27734 // CHECK28-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27735 // CHECK28-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
27736 // CHECK28-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27737 // CHECK28-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27738 // CHECK28-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
27739 // CHECK28-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
27740 // CHECK28:       simd.if.then:
27741 // CHECK28-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27742 // CHECK28-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
27743 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27744 // CHECK28:       omp.inner.for.cond:
27745 // CHECK28-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
27746 // CHECK28-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
27747 // CHECK28-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
27748 // CHECK28-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
27749 // CHECK28-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27750 // CHECK28:       omp.inner.for.body:
27751 // CHECK28-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !22
27752 // CHECK28-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
27753 // CHECK28-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
27754 // CHECK28-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
27755 // CHECK28-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !22
27756 // CHECK28-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22
27757 // CHECK28-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
27758 // CHECK28-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !22
27759 // CHECK28-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22
27760 // CHECK28-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
27761 // CHECK28-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
27762 // CHECK28-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
27763 // CHECK28-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !22
27764 // CHECK28-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !22
27765 // CHECK28-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
27766 // CHECK28-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
27767 // CHECK28-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
27768 // CHECK28-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !22
27769 // CHECK28-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
27770 // CHECK28-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
27771 // CHECK28-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
27772 // CHECK28-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
27773 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27774 // CHECK28:       omp.body.continue:
27775 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27776 // CHECK28:       omp.inner.for.inc:
27777 // CHECK28-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
27778 // CHECK28-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
27779 // CHECK28-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
27780 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
27781 // CHECK28:       omp.inner.for.end:
27782 // CHECK28-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27783 // CHECK28-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
27784 // CHECK28-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
27785 // CHECK28-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
27786 // CHECK28-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
27787 // CHECK28-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
27788 // CHECK28-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
27789 // CHECK28-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
27790 // CHECK28-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
27791 // CHECK28-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
27792 // CHECK28-NEXT:    br label [[SIMD_IF_END]]
27793 // CHECK28:       simd.if.end:
27794 // CHECK28-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
27795 // CHECK28-NEXT:    ret i32 [[TMP21]]
27796 //
27797 //
27798 // CHECK28-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
27799 // CHECK28-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
27800 // CHECK28-NEXT:  entry:
27801 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27802 // CHECK28-NEXT:    [[A:%.*]] = alloca i32, align 4
27803 // CHECK28-NEXT:    [[AA:%.*]] = alloca i16, align 2
27804 // CHECK28-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
27805 // CHECK28-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27806 // CHECK28-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27807 // CHECK28-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27808 // CHECK28-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27809 // CHECK28-NEXT:    [[I:%.*]] = alloca i32, align 4
27810 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27811 // CHECK28-NEXT:    store i32 0, i32* [[A]], align 4
27812 // CHECK28-NEXT:    store i16 0, i16* [[AA]], align 2
27813 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27814 // CHECK28-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
27815 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27816 // CHECK28-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
27817 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27818 // CHECK28:       omp.inner.for.cond:
27819 // CHECK28-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
27820 // CHECK28-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
27821 // CHECK28-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
27822 // CHECK28-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27823 // CHECK28:       omp.inner.for.body:
27824 // CHECK28-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
27825 // CHECK28-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
27826 // CHECK28-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27827 // CHECK28-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
27828 // CHECK28-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
27829 // CHECK28-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
27830 // CHECK28-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25
27831 // CHECK28-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
27832 // CHECK28-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
27833 // CHECK28-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
27834 // CHECK28-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
27835 // CHECK28-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25
27836 // CHECK28-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
27837 // CHECK28-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
27838 // CHECK28-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
27839 // CHECK28-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
27840 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27841 // CHECK28:       omp.body.continue:
27842 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27843 // CHECK28:       omp.inner.for.inc:
27844 // CHECK28-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
27845 // CHECK28-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
27846 // CHECK28-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
27847 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
27848 // CHECK28:       omp.inner.for.end:
27849 // CHECK28-NEXT:    store i32 10, i32* [[I]], align 4
27850 // CHECK28-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
27851 // CHECK28-NEXT:    ret i32 [[TMP8]]
27852 //
27853 //
27854 // CHECK29-LABEL: define {{[^@]+}}@_Z3fooi
27855 // CHECK29-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
27856 // CHECK29-NEXT:  entry:
27857 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27858 // CHECK29-NEXT:    [[A:%.*]] = alloca i32, align 4
27859 // CHECK29-NEXT:    [[AA:%.*]] = alloca i16, align 2
27860 // CHECK29-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
27861 // CHECK29-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
27862 // CHECK29-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
27863 // CHECK29-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
27864 // CHECK29-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
27865 // CHECK29-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
27866 // CHECK29-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27867 // CHECK29-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
27868 // CHECK29-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27869 // CHECK29-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27870 // CHECK29-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27871 // CHECK29-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27872 // CHECK29-NEXT:    [[I:%.*]] = alloca i32, align 4
27873 // CHECK29-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
27874 // CHECK29-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
27875 // CHECK29-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
27876 // CHECK29-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
27877 // CHECK29-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
27878 // CHECK29-NEXT:    [[A8:%.*]] = alloca i32, align 4
27879 // CHECK29-NEXT:    [[A9:%.*]] = alloca i32, align 4
27880 // CHECK29-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
27881 // CHECK29-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
27882 // CHECK29-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
27883 // CHECK29-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
27884 // CHECK29-NEXT:    [[I24:%.*]] = alloca i32, align 4
27885 // CHECK29-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
27886 // CHECK29-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
27887 // CHECK29-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
27888 // CHECK29-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
27889 // CHECK29-NEXT:    [[I40:%.*]] = alloca i32, align 4
27890 // CHECK29-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
27891 // CHECK29-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
27892 // CHECK29-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
27893 // CHECK29-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
27894 // CHECK29-NEXT:    [[I58:%.*]] = alloca i32, align 4
27895 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27896 // CHECK29-NEXT:    store i32 0, i32* [[A]], align 4
27897 // CHECK29-NEXT:    store i16 0, i16* [[AA]], align 2
27898 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27899 // CHECK29-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
27900 // CHECK29-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
27901 // CHECK29-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
27902 // CHECK29-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
27903 // CHECK29-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
27904 // CHECK29-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
27905 // CHECK29-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
27906 // CHECK29-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
27907 // CHECK29-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
27908 // CHECK29-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
27909 // CHECK29-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
27910 // CHECK29-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
27911 // CHECK29-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
27912 // CHECK29-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_2]], align 4
27913 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27914 // CHECK29-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
27915 // CHECK29-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27916 // CHECK29-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
27917 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27918 // CHECK29:       omp.inner.for.cond:
27919 // CHECK29-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27920 // CHECK29-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
27921 // CHECK29-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
27922 // CHECK29-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27923 // CHECK29:       omp.inner.for.body:
27924 // CHECK29-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27925 // CHECK29-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
27926 // CHECK29-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
27927 // CHECK29-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
27928 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27929 // CHECK29:       omp.body.continue:
27930 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27931 // CHECK29:       omp.inner.for.inc:
27932 // CHECK29-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27933 // CHECK29-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
27934 // CHECK29-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
27935 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
27936 // CHECK29:       omp.inner.for.end:
27937 // CHECK29-NEXT:    store i32 10, i32* [[I]], align 4
27938 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
27939 // CHECK29-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
27940 // CHECK29-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
27941 // CHECK29-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV7]], align 4
27942 // CHECK29-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A]], align 4
27943 // CHECK29-NEXT:    store i32 [[TMP14]], i32* [[DOTLINEAR_START]], align 4
27944 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
27945 // CHECK29:       omp.inner.for.cond10:
27946 // CHECK29-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
27947 // CHECK29-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
27948 // CHECK29-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
27949 // CHECK29-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
27950 // CHECK29:       omp.inner.for.body12:
27951 // CHECK29-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
27952 // CHECK29-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1
27953 // CHECK29-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
27954 // CHECK29-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4, !nontemporal !7
27955 // CHECK29-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A8]], align 4, !nontemporal !7
27956 // CHECK29-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
27957 // CHECK29-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4, !nontemporal !7
27958 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
27959 // CHECK29:       omp.body.continue16:
27960 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
27961 // CHECK29:       omp.inner.for.inc17:
27962 // CHECK29-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
27963 // CHECK29-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
27964 // CHECK29-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
27965 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]]
27966 // CHECK29:       omp.inner.for.end19:
27967 // CHECK29-NEXT:    store i32 10, i32* [[A]], align 4
27968 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
27969 // CHECK29-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
27970 // CHECK29-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
27971 // CHECK29-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_IV23]], align 4
27972 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
27973 // CHECK29:       omp.inner.for.cond25:
27974 // CHECK29-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
27975 // CHECK29-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !10
27976 // CHECK29-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]]
27977 // CHECK29-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
27978 // CHECK29:       omp.inner.for.body27:
27979 // CHECK29-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
27980 // CHECK29-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1
27981 // CHECK29-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
27982 // CHECK29-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !10
27983 // CHECK29-NEXT:    [[TMP24:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
27984 // CHECK29-NEXT:    [[CONV:%.*]] = sext i16 [[TMP24]] to i32
27985 // CHECK29-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
27986 // CHECK29-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
27987 // CHECK29-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !10
27988 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
27989 // CHECK29:       omp.body.continue32:
27990 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
27991 // CHECK29:       omp.inner.for.inc33:
27992 // CHECK29-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
27993 // CHECK29-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1
27994 // CHECK29-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
27995 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]]
27996 // CHECK29:       omp.inner.for.end35:
27997 // CHECK29-NEXT:    store i32 10, i32* [[I24]], align 4
27998 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
27999 // CHECK29-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
28000 // CHECK29-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
28001 // CHECK29-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV39]], align 4
28002 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
28003 // CHECK29:       omp.inner.for.cond41:
28004 // CHECK29-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
28005 // CHECK29-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !13
28006 // CHECK29-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
28007 // CHECK29-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
28008 // CHECK29:       omp.inner.for.body43:
28009 // CHECK29-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
28010 // CHECK29-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1
28011 // CHECK29-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
28012 // CHECK29-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !13
28013 // CHECK29-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
28014 // CHECK29-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1
28015 // CHECK29-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !13
28016 // CHECK29-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
28017 // CHECK29-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP31]] to i32
28018 // CHECK29-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
28019 // CHECK29-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
28020 // CHECK29-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !13
28021 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
28022 // CHECK29:       omp.body.continue50:
28023 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
28024 // CHECK29:       omp.inner.for.inc51:
28025 // CHECK29-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
28026 // CHECK29-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1
28027 // CHECK29-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
28028 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]]
28029 // CHECK29:       omp.inner.for.end53:
28030 // CHECK29-NEXT:    store i32 10, i32* [[I40]], align 4
28031 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
28032 // CHECK29-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
28033 // CHECK29-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
28034 // CHECK29-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV57]], align 4
28035 // CHECK29-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 0
28036 // CHECK29-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
28037 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
28038 // CHECK29:       omp.inner.for.cond59:
28039 // CHECK29-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
28040 // CHECK29-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !16
28041 // CHECK29-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]]
28042 // CHECK29-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
28043 // CHECK29:       omp.inner.for.body61:
28044 // CHECK29-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
28045 // CHECK29-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1
28046 // CHECK29-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
28047 // CHECK29-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !16
28048 // CHECK29-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
28049 // CHECK29-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1
28050 // CHECK29-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !16
28051 // CHECK29-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
28052 // CHECK29-NEXT:    [[TMP38:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
28053 // CHECK29-NEXT:    [[CONV65:%.*]] = fpext float [[TMP38]] to double
28054 // CHECK29-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
28055 // CHECK29-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
28056 // CHECK29-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
28057 // CHECK29-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
28058 // CHECK29-NEXT:    [[TMP39:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
28059 // CHECK29-NEXT:    [[CONV69:%.*]] = fpext float [[TMP39]] to double
28060 // CHECK29-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
28061 // CHECK29-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
28062 // CHECK29-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
28063 // CHECK29-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
28064 // CHECK29-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i64 0, i64 2
28065 // CHECK29-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
28066 // CHECK29-NEXT:    [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00
28067 // CHECK29-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
28068 // CHECK29-NEXT:    [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]]
28069 // CHECK29-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP41]]
28070 // CHECK29-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i64 3
28071 // CHECK29-NEXT:    [[TMP42:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
28072 // CHECK29-NEXT:    [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00
28073 // CHECK29-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
28074 // CHECK29-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
28075 // CHECK29-NEXT:    [[TMP43:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !16
28076 // CHECK29-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1
28077 // CHECK29-NEXT:    store i64 [[ADD78]], i64* [[X]], align 8, !llvm.access.group !16
28078 // CHECK29-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
28079 // CHECK29-NEXT:    [[TMP44:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !16
28080 // CHECK29-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP44]] to i32
28081 // CHECK29-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
28082 // CHECK29-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
28083 // CHECK29-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 8, !llvm.access.group !16
28084 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
28085 // CHECK29:       omp.body.continue82:
28086 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
28087 // CHECK29:       omp.inner.for.inc83:
28088 // CHECK29-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
28089 // CHECK29-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1
28090 // CHECK29-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
28091 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]]
28092 // CHECK29:       omp.inner.for.end85:
28093 // CHECK29-NEXT:    store i32 10, i32* [[I58]], align 4
28094 // CHECK29-NEXT:    [[TMP46:%.*]] = load i32, i32* [[A]], align 4
28095 // CHECK29-NEXT:    [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
28096 // CHECK29-NEXT:    call void @llvm.stackrestore(i8* [[TMP47]])
28097 // CHECK29-NEXT:    ret i32 [[TMP46]]
28098 //
28099 //
28100 // CHECK29-LABEL: define {{[^@]+}}@_Z3bari
28101 // CHECK29-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
28102 // CHECK29-NEXT:  entry:
28103 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28104 // CHECK29-NEXT:    [[A:%.*]] = alloca i32, align 4
28105 // CHECK29-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
28106 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28107 // CHECK29-NEXT:    store i32 0, i32* [[A]], align 4
28108 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
28109 // CHECK29-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
28110 // CHECK29-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
28111 // CHECK29-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
28112 // CHECK29-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
28113 // CHECK29-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28114 // CHECK29-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
28115 // CHECK29-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
28116 // CHECK29-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
28117 // CHECK29-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
28118 // CHECK29-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
28119 // CHECK29-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
28120 // CHECK29-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
28121 // CHECK29-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
28122 // CHECK29-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
28123 // CHECK29-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
28124 // CHECK29-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
28125 // CHECK29-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
28126 // CHECK29-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
28127 // CHECK29-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
28128 // CHECK29-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
28129 // CHECK29-NEXT:    ret i32 [[TMP8]]
28130 //
28131 //
28132 // CHECK29-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
28133 // CHECK29-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
28134 // CHECK29-NEXT:  entry:
28135 // CHECK29-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
28136 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28137 // CHECK29-NEXT:    [[B:%.*]] = alloca i32, align 4
28138 // CHECK29-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
28139 // CHECK29-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
28140 // CHECK29-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
28141 // CHECK29-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28142 // CHECK29-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28143 // CHECK29-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28144 // CHECK29-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28145 // CHECK29-NEXT:    [[I:%.*]] = alloca i32, align 4
28146 // CHECK29-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
28147 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28148 // CHECK29-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
28149 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
28150 // CHECK29-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
28151 // CHECK29-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
28152 // CHECK29-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
28153 // CHECK29-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
28154 // CHECK29-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
28155 // CHECK29-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
28156 // CHECK29-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
28157 // CHECK29-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
28158 // CHECK29-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
28159 // CHECK29-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
28160 // CHECK29-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
28161 // CHECK29-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
28162 // CHECK29-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
28163 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28164 // CHECK29-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
28165 // CHECK29-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28166 // CHECK29-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
28167 // CHECK29-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
28168 // CHECK29-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
28169 // CHECK29-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
28170 // CHECK29:       omp_if.then:
28171 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28172 // CHECK29:       omp.inner.for.cond:
28173 // CHECK29-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
28174 // CHECK29-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
28175 // CHECK29-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
28176 // CHECK29-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28177 // CHECK29:       omp.inner.for.body:
28178 // CHECK29-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
28179 // CHECK29-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
28180 // CHECK29-NEXT:    [[ADD3:%.*]] = add nsw i32 0, [[MUL]]
28181 // CHECK29-NEXT:    store i32 [[ADD3]], i32* [[I]], align 4, !llvm.access.group !19
28182 // CHECK29-NEXT:    [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
28183 // CHECK29-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP11]] to double
28184 // CHECK29-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
28185 // CHECK29-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
28186 // CHECK29-NEXT:    store double [[ADD4]], double* [[A]], align 8, !llvm.access.group !19
28187 // CHECK29-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
28188 // CHECK29-NEXT:    [[TMP12:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !19
28189 // CHECK29-NEXT:    [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00
28190 // CHECK29-NEXT:    store double [[INC]], double* [[A5]], align 8, !llvm.access.group !19
28191 // CHECK29-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
28192 // CHECK29-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
28193 // CHECK29-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
28194 // CHECK29-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
28195 // CHECK29-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !19
28196 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28197 // CHECK29:       omp.body.continue:
28198 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28199 // CHECK29:       omp.inner.for.inc:
28200 // CHECK29-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
28201 // CHECK29-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP14]], 1
28202 // CHECK29-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
28203 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
28204 // CHECK29:       omp.inner.for.end:
28205 // CHECK29-NEXT:    br label [[OMP_IF_END:%.*]]
28206 // CHECK29:       omp_if.else:
28207 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
28208 // CHECK29:       omp.inner.for.cond9:
28209 // CHECK29-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
28210 // CHECK29-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
28211 // CHECK29-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
28212 // CHECK29-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]]
28213 // CHECK29:       omp.inner.for.body11:
28214 // CHECK29-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
28215 // CHECK29-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP17]], 1
28216 // CHECK29-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
28217 // CHECK29-NEXT:    store i32 [[ADD13]], i32* [[I]], align 4
28218 // CHECK29-NEXT:    [[TMP18:%.*]] = load i32, i32* [[B]], align 4
28219 // CHECK29-NEXT:    [[CONV14:%.*]] = sitofp i32 [[TMP18]] to double
28220 // CHECK29-NEXT:    [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00
28221 // CHECK29-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
28222 // CHECK29-NEXT:    store double [[ADD15]], double* [[A16]], align 8
28223 // CHECK29-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
28224 // CHECK29-NEXT:    [[TMP19:%.*]] = load double, double* [[A17]], align 8
28225 // CHECK29-NEXT:    [[INC18:%.*]] = fadd double [[TMP19]], 1.000000e+00
28226 // CHECK29-NEXT:    store double [[INC18]], double* [[A17]], align 8
28227 // CHECK29-NEXT:    [[CONV19:%.*]] = fptosi double [[INC18]] to i16
28228 // CHECK29-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]]
28229 // CHECK29-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]]
28230 // CHECK29-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX20]], i64 1
28231 // CHECK29-NEXT:    store i16 [[CONV19]], i16* [[ARRAYIDX21]], align 2
28232 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE22:%.*]]
28233 // CHECK29:       omp.body.continue22:
28234 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC23:%.*]]
28235 // CHECK29:       omp.inner.for.inc23:
28236 // CHECK29-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
28237 // CHECK29-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP21]], 1
28238 // CHECK29-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4
28239 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP22:![0-9]+]]
28240 // CHECK29:       omp.inner.for.end25:
28241 // CHECK29-NEXT:    br label [[OMP_IF_END]]
28242 // CHECK29:       omp_if.end:
28243 // CHECK29-NEXT:    store i32 10, i32* [[I]], align 4
28244 // CHECK29-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
28245 // CHECK29-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]]
28246 // CHECK29-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1
28247 // CHECK29-NEXT:    [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX27]], align 2
28248 // CHECK29-NEXT:    [[CONV28:%.*]] = sext i16 [[TMP23]] to i32
28249 // CHECK29-NEXT:    [[TMP24:%.*]] = load i32, i32* [[B]], align 4
28250 // CHECK29-NEXT:    [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP24]]
28251 // CHECK29-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
28252 // CHECK29-NEXT:    call void @llvm.stackrestore(i8* [[TMP25]])
28253 // CHECK29-NEXT:    ret i32 [[ADD29]]
28254 //
28255 //
28256 // CHECK29-LABEL: define {{[^@]+}}@_ZL7fstatici
28257 // CHECK29-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
28258 // CHECK29-NEXT:  entry:
28259 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28260 // CHECK29-NEXT:    [[A:%.*]] = alloca i32, align 4
28261 // CHECK29-NEXT:    [[AA:%.*]] = alloca i16, align 2
28262 // CHECK29-NEXT:    [[AAA:%.*]] = alloca i8, align 1
28263 // CHECK29-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
28264 // CHECK29-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28265 // CHECK29-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28266 // CHECK29-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28267 // CHECK29-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
28268 // CHECK29-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28269 // CHECK29-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28270 // CHECK29-NEXT:    [[I:%.*]] = alloca i32, align 4
28271 // CHECK29-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28272 // CHECK29-NEXT:    [[I5:%.*]] = alloca i32, align 4
28273 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28274 // CHECK29-NEXT:    store i32 0, i32* [[A]], align 4
28275 // CHECK29-NEXT:    store i16 0, i16* [[AA]], align 2
28276 // CHECK29-NEXT:    store i8 0, i8* [[AAA]], align 1
28277 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
28278 // CHECK29-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
28279 // CHECK29-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
28280 // CHECK29-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28281 // CHECK29-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28282 // CHECK29-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28283 // CHECK29-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
28284 // CHECK29-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
28285 // CHECK29-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
28286 // CHECK29-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
28287 // CHECK29-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
28288 // CHECK29-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
28289 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28290 // CHECK29-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28291 // CHECK29-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
28292 // CHECK29-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28293 // CHECK29-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
28294 // CHECK29-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28295 // CHECK29-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28296 // CHECK29-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
28297 // CHECK29-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
28298 // CHECK29:       simd.if.then:
28299 // CHECK29-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28300 // CHECK29-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
28301 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28302 // CHECK29:       omp.inner.for.cond:
28303 // CHECK29-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28304 // CHECK29-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
28305 // CHECK29-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
28306 // CHECK29-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
28307 // CHECK29-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28308 // CHECK29:       omp.inner.for.body:
28309 // CHECK29-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !24
28310 // CHECK29-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28311 // CHECK29-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
28312 // CHECK29-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
28313 // CHECK29-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !24
28314 // CHECK29-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
28315 // CHECK29-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
28316 // CHECK29-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !24
28317 // CHECK29-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
28318 // CHECK29-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
28319 // CHECK29-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
28320 // CHECK29-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
28321 // CHECK29-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !24
28322 // CHECK29-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !24
28323 // CHECK29-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
28324 // CHECK29-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
28325 // CHECK29-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
28326 // CHECK29-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !24
28327 // CHECK29-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
28328 // CHECK29-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
28329 // CHECK29-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
28330 // CHECK29-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
28331 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28332 // CHECK29:       omp.body.continue:
28333 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28334 // CHECK29:       omp.inner.for.inc:
28335 // CHECK29-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28336 // CHECK29-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
28337 // CHECK29-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28338 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
28339 // CHECK29:       omp.inner.for.end:
28340 // CHECK29-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28341 // CHECK29-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28342 // CHECK29-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28343 // CHECK29-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
28344 // CHECK29-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
28345 // CHECK29-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
28346 // CHECK29-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
28347 // CHECK29-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
28348 // CHECK29-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
28349 // CHECK29-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
28350 // CHECK29-NEXT:    br label [[SIMD_IF_END]]
28351 // CHECK29:       simd.if.end:
28352 // CHECK29-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
28353 // CHECK29-NEXT:    ret i32 [[TMP21]]
28354 //
28355 //
28356 // CHECK29-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
28357 // CHECK29-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
28358 // CHECK29-NEXT:  entry:
28359 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28360 // CHECK29-NEXT:    [[A:%.*]] = alloca i32, align 4
28361 // CHECK29-NEXT:    [[AA:%.*]] = alloca i16, align 2
28362 // CHECK29-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
28363 // CHECK29-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28364 // CHECK29-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28365 // CHECK29-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28366 // CHECK29-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28367 // CHECK29-NEXT:    [[I:%.*]] = alloca i32, align 4
28368 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28369 // CHECK29-NEXT:    store i32 0, i32* [[A]], align 4
28370 // CHECK29-NEXT:    store i16 0, i16* [[AA]], align 2
28371 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28372 // CHECK29-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
28373 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28374 // CHECK29-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
28375 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28376 // CHECK29:       omp.inner.for.cond:
28377 // CHECK29-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
28378 // CHECK29-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
28379 // CHECK29-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
28380 // CHECK29-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28381 // CHECK29:       omp.inner.for.body:
28382 // CHECK29-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
28383 // CHECK29-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
28384 // CHECK29-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28385 // CHECK29-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27
28386 // CHECK29-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !27
28387 // CHECK29-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
28388 // CHECK29-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !27
28389 // CHECK29-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !27
28390 // CHECK29-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
28391 // CHECK29-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
28392 // CHECK29-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
28393 // CHECK29-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !27
28394 // CHECK29-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
28395 // CHECK29-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
28396 // CHECK29-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
28397 // CHECK29-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
28398 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28399 // CHECK29:       omp.body.continue:
28400 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28401 // CHECK29:       omp.inner.for.inc:
28402 // CHECK29-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
28403 // CHECK29-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
28404 // CHECK29-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
28405 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
28406 // CHECK29:       omp.inner.for.end:
28407 // CHECK29-NEXT:    store i32 10, i32* [[I]], align 4
28408 // CHECK29-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
28409 // CHECK29-NEXT:    ret i32 [[TMP8]]
28410 //
28411 //
28412 // CHECK30-LABEL: define {{[^@]+}}@_Z3fooi
28413 // CHECK30-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
28414 // CHECK30-NEXT:  entry:
28415 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28416 // CHECK30-NEXT:    [[A:%.*]] = alloca i32, align 4
28417 // CHECK30-NEXT:    [[AA:%.*]] = alloca i16, align 2
28418 // CHECK30-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
28419 // CHECK30-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
28420 // CHECK30-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
28421 // CHECK30-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
28422 // CHECK30-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
28423 // CHECK30-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
28424 // CHECK30-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28425 // CHECK30-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
28426 // CHECK30-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28427 // CHECK30-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28428 // CHECK30-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28429 // CHECK30-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28430 // CHECK30-NEXT:    [[I:%.*]] = alloca i32, align 4
28431 // CHECK30-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
28432 // CHECK30-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
28433 // CHECK30-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
28434 // CHECK30-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
28435 // CHECK30-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
28436 // CHECK30-NEXT:    [[A8:%.*]] = alloca i32, align 4
28437 // CHECK30-NEXT:    [[A9:%.*]] = alloca i32, align 4
28438 // CHECK30-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
28439 // CHECK30-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
28440 // CHECK30-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
28441 // CHECK30-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
28442 // CHECK30-NEXT:    [[I24:%.*]] = alloca i32, align 4
28443 // CHECK30-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
28444 // CHECK30-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
28445 // CHECK30-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
28446 // CHECK30-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
28447 // CHECK30-NEXT:    [[I40:%.*]] = alloca i32, align 4
28448 // CHECK30-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
28449 // CHECK30-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
28450 // CHECK30-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
28451 // CHECK30-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
28452 // CHECK30-NEXT:    [[I58:%.*]] = alloca i32, align 4
28453 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28454 // CHECK30-NEXT:    store i32 0, i32* [[A]], align 4
28455 // CHECK30-NEXT:    store i16 0, i16* [[AA]], align 2
28456 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
28457 // CHECK30-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
28458 // CHECK30-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
28459 // CHECK30-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
28460 // CHECK30-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
28461 // CHECK30-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
28462 // CHECK30-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
28463 // CHECK30-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
28464 // CHECK30-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
28465 // CHECK30-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
28466 // CHECK30-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
28467 // CHECK30-NEXT:    [[TMP6:%.*]] = load i32, i32* [[A]], align 4
28468 // CHECK30-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_]], align 4
28469 // CHECK30-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
28470 // CHECK30-NEXT:    store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_2]], align 4
28471 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28472 // CHECK30-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
28473 // CHECK30-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28474 // CHECK30-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
28475 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28476 // CHECK30:       omp.inner.for.cond:
28477 // CHECK30-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28478 // CHECK30-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
28479 // CHECK30-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
28480 // CHECK30-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28481 // CHECK30:       omp.inner.for.body:
28482 // CHECK30-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28483 // CHECK30-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
28484 // CHECK30-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28485 // CHECK30-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
28486 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28487 // CHECK30:       omp.body.continue:
28488 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28489 // CHECK30:       omp.inner.for.inc:
28490 // CHECK30-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28491 // CHECK30-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
28492 // CHECK30-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
28493 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
28494 // CHECK30:       omp.inner.for.end:
28495 // CHECK30-NEXT:    store i32 10, i32* [[I]], align 4
28496 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
28497 // CHECK30-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
28498 // CHECK30-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
28499 // CHECK30-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV7]], align 4
28500 // CHECK30-NEXT:    [[TMP14:%.*]] = load i32, i32* [[A]], align 4
28501 // CHECK30-NEXT:    store i32 [[TMP14]], i32* [[DOTLINEAR_START]], align 4
28502 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
28503 // CHECK30:       omp.inner.for.cond10:
28504 // CHECK30-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
28505 // CHECK30-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
28506 // CHECK30-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
28507 // CHECK30-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
28508 // CHECK30:       omp.inner.for.body12:
28509 // CHECK30-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
28510 // CHECK30-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1
28511 // CHECK30-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
28512 // CHECK30-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4, !nontemporal !7
28513 // CHECK30-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A8]], align 4, !nontemporal !7
28514 // CHECK30-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
28515 // CHECK30-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4, !nontemporal !7
28516 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
28517 // CHECK30:       omp.body.continue16:
28518 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
28519 // CHECK30:       omp.inner.for.inc17:
28520 // CHECK30-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
28521 // CHECK30-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
28522 // CHECK30-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
28523 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]]
28524 // CHECK30:       omp.inner.for.end19:
28525 // CHECK30-NEXT:    store i32 10, i32* [[A]], align 4
28526 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
28527 // CHECK30-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
28528 // CHECK30-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
28529 // CHECK30-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_IV23]], align 4
28530 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
28531 // CHECK30:       omp.inner.for.cond25:
28532 // CHECK30-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
28533 // CHECK30-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !10
28534 // CHECK30-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]]
28535 // CHECK30-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
28536 // CHECK30:       omp.inner.for.body27:
28537 // CHECK30-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
28538 // CHECK30-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1
28539 // CHECK30-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
28540 // CHECK30-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !10
28541 // CHECK30-NEXT:    [[TMP24:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
28542 // CHECK30-NEXT:    [[CONV:%.*]] = sext i16 [[TMP24]] to i32
28543 // CHECK30-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
28544 // CHECK30-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
28545 // CHECK30-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !10
28546 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
28547 // CHECK30:       omp.body.continue32:
28548 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
28549 // CHECK30:       omp.inner.for.inc33:
28550 // CHECK30-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
28551 // CHECK30-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1
28552 // CHECK30-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !10
28553 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]]
28554 // CHECK30:       omp.inner.for.end35:
28555 // CHECK30-NEXT:    store i32 10, i32* [[I24]], align 4
28556 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
28557 // CHECK30-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
28558 // CHECK30-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
28559 // CHECK30-NEXT:    store i32 [[TMP26]], i32* [[DOTOMP_IV39]], align 4
28560 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
28561 // CHECK30:       omp.inner.for.cond41:
28562 // CHECK30-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
28563 // CHECK30-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !13
28564 // CHECK30-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
28565 // CHECK30-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
28566 // CHECK30:       omp.inner.for.body43:
28567 // CHECK30-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
28568 // CHECK30-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1
28569 // CHECK30-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
28570 // CHECK30-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !13
28571 // CHECK30-NEXT:    [[TMP30:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
28572 // CHECK30-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1
28573 // CHECK30-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !13
28574 // CHECK30-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
28575 // CHECK30-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP31]] to i32
28576 // CHECK30-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
28577 // CHECK30-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
28578 // CHECK30-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !13
28579 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
28580 // CHECK30:       omp.body.continue50:
28581 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
28582 // CHECK30:       omp.inner.for.inc51:
28583 // CHECK30-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
28584 // CHECK30-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1
28585 // CHECK30-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !13
28586 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]]
28587 // CHECK30:       omp.inner.for.end53:
28588 // CHECK30-NEXT:    store i32 10, i32* [[I40]], align 4
28589 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
28590 // CHECK30-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
28591 // CHECK30-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
28592 // CHECK30-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV57]], align 4
28593 // CHECK30-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 0
28594 // CHECK30-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i64 16) ]
28595 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
28596 // CHECK30:       omp.inner.for.cond59:
28597 // CHECK30-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
28598 // CHECK30-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !16
28599 // CHECK30-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]]
28600 // CHECK30-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
28601 // CHECK30:       omp.inner.for.body61:
28602 // CHECK30-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
28603 // CHECK30-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1
28604 // CHECK30-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
28605 // CHECK30-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !16
28606 // CHECK30-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
28607 // CHECK30-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1
28608 // CHECK30-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !16
28609 // CHECK30-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
28610 // CHECK30-NEXT:    [[TMP38:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
28611 // CHECK30-NEXT:    [[CONV65:%.*]] = fpext float [[TMP38]] to double
28612 // CHECK30-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
28613 // CHECK30-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
28614 // CHECK30-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
28615 // CHECK30-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
28616 // CHECK30-NEXT:    [[TMP39:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
28617 // CHECK30-NEXT:    [[CONV69:%.*]] = fpext float [[TMP39]] to double
28618 // CHECK30-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
28619 // CHECK30-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
28620 // CHECK30-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !16
28621 // CHECK30-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
28622 // CHECK30-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i64 0, i64 2
28623 // CHECK30-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
28624 // CHECK30-NEXT:    [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00
28625 // CHECK30-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !16
28626 // CHECK30-NEXT:    [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]]
28627 // CHECK30-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP41]]
28628 // CHECK30-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i64 3
28629 // CHECK30-NEXT:    [[TMP42:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
28630 // CHECK30-NEXT:    [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00
28631 // CHECK30-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !16
28632 // CHECK30-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
28633 // CHECK30-NEXT:    [[TMP43:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !16
28634 // CHECK30-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1
28635 // CHECK30-NEXT:    store i64 [[ADD78]], i64* [[X]], align 8, !llvm.access.group !16
28636 // CHECK30-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
28637 // CHECK30-NEXT:    [[TMP44:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !16
28638 // CHECK30-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP44]] to i32
28639 // CHECK30-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
28640 // CHECK30-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
28641 // CHECK30-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 8, !llvm.access.group !16
28642 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
28643 // CHECK30:       omp.body.continue82:
28644 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
28645 // CHECK30:       omp.inner.for.inc83:
28646 // CHECK30-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
28647 // CHECK30-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1
28648 // CHECK30-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !16
28649 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]]
28650 // CHECK30:       omp.inner.for.end85:
28651 // CHECK30-NEXT:    store i32 10, i32* [[I58]], align 4
28652 // CHECK30-NEXT:    [[TMP46:%.*]] = load i32, i32* [[A]], align 4
28653 // CHECK30-NEXT:    [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
28654 // CHECK30-NEXT:    call void @llvm.stackrestore(i8* [[TMP47]])
28655 // CHECK30-NEXT:    ret i32 [[TMP46]]
28656 //
28657 //
28658 // CHECK30-LABEL: define {{[^@]+}}@_Z3bari
28659 // CHECK30-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
28660 // CHECK30-NEXT:  entry:
28661 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28662 // CHECK30-NEXT:    [[A:%.*]] = alloca i32, align 4
28663 // CHECK30-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
28664 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28665 // CHECK30-NEXT:    store i32 0, i32* [[A]], align 4
28666 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
28667 // CHECK30-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
28668 // CHECK30-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
28669 // CHECK30-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
28670 // CHECK30-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
28671 // CHECK30-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
28672 // CHECK30-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef signext [[TMP2]])
28673 // CHECK30-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
28674 // CHECK30-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
28675 // CHECK30-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
28676 // CHECK30-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
28677 // CHECK30-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
28678 // CHECK30-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
28679 // CHECK30-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
28680 // CHECK30-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
28681 // CHECK30-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
28682 // CHECK30-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
28683 // CHECK30-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
28684 // CHECK30-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
28685 // CHECK30-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
28686 // CHECK30-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
28687 // CHECK30-NEXT:    ret i32 [[TMP8]]
28688 //
28689 //
28690 // CHECK30-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
28691 // CHECK30-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
28692 // CHECK30-NEXT:  entry:
28693 // CHECK30-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
28694 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28695 // CHECK30-NEXT:    [[B:%.*]] = alloca i32, align 4
28696 // CHECK30-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
28697 // CHECK30-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
28698 // CHECK30-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
28699 // CHECK30-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28700 // CHECK30-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28701 // CHECK30-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28702 // CHECK30-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28703 // CHECK30-NEXT:    [[I:%.*]] = alloca i32, align 4
28704 // CHECK30-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
28705 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28706 // CHECK30-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
28707 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
28708 // CHECK30-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
28709 // CHECK30-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
28710 // CHECK30-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
28711 // CHECK30-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
28712 // CHECK30-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
28713 // CHECK30-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
28714 // CHECK30-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
28715 // CHECK30-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
28716 // CHECK30-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
28717 // CHECK30-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
28718 // CHECK30-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
28719 // CHECK30-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
28720 // CHECK30-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
28721 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28722 // CHECK30-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
28723 // CHECK30-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28724 // CHECK30-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
28725 // CHECK30-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
28726 // CHECK30-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
28727 // CHECK30-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
28728 // CHECK30:       omp_if.then:
28729 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28730 // CHECK30:       omp.inner.for.cond:
28731 // CHECK30-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
28732 // CHECK30-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
28733 // CHECK30-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
28734 // CHECK30-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28735 // CHECK30:       omp.inner.for.body:
28736 // CHECK30-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
28737 // CHECK30-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
28738 // CHECK30-NEXT:    [[ADD3:%.*]] = add nsw i32 0, [[MUL]]
28739 // CHECK30-NEXT:    store i32 [[ADD3]], i32* [[I]], align 4, !llvm.access.group !19
28740 // CHECK30-NEXT:    [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
28741 // CHECK30-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP11]] to double
28742 // CHECK30-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
28743 // CHECK30-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
28744 // CHECK30-NEXT:    store double [[ADD4]], double* [[A]], align 8, !llvm.access.group !19
28745 // CHECK30-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
28746 // CHECK30-NEXT:    [[TMP12:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !19
28747 // CHECK30-NEXT:    [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00
28748 // CHECK30-NEXT:    store double [[INC]], double* [[A5]], align 8, !llvm.access.group !19
28749 // CHECK30-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
28750 // CHECK30-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
28751 // CHECK30-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
28752 // CHECK30-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
28753 // CHECK30-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !19
28754 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28755 // CHECK30:       omp.body.continue:
28756 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28757 // CHECK30:       omp.inner.for.inc:
28758 // CHECK30-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
28759 // CHECK30-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP14]], 1
28760 // CHECK30-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
28761 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
28762 // CHECK30:       omp.inner.for.end:
28763 // CHECK30-NEXT:    br label [[OMP_IF_END:%.*]]
28764 // CHECK30:       omp_if.else:
28765 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
28766 // CHECK30:       omp.inner.for.cond9:
28767 // CHECK30-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
28768 // CHECK30-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
28769 // CHECK30-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
28770 // CHECK30-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]]
28771 // CHECK30:       omp.inner.for.body11:
28772 // CHECK30-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
28773 // CHECK30-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP17]], 1
28774 // CHECK30-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
28775 // CHECK30-NEXT:    store i32 [[ADD13]], i32* [[I]], align 4
28776 // CHECK30-NEXT:    [[TMP18:%.*]] = load i32, i32* [[B]], align 4
28777 // CHECK30-NEXT:    [[CONV14:%.*]] = sitofp i32 [[TMP18]] to double
28778 // CHECK30-NEXT:    [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00
28779 // CHECK30-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
28780 // CHECK30-NEXT:    store double [[ADD15]], double* [[A16]], align 8
28781 // CHECK30-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
28782 // CHECK30-NEXT:    [[TMP19:%.*]] = load double, double* [[A17]], align 8
28783 // CHECK30-NEXT:    [[INC18:%.*]] = fadd double [[TMP19]], 1.000000e+00
28784 // CHECK30-NEXT:    store double [[INC18]], double* [[A17]], align 8
28785 // CHECK30-NEXT:    [[CONV19:%.*]] = fptosi double [[INC18]] to i16
28786 // CHECK30-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]]
28787 // CHECK30-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]]
28788 // CHECK30-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX20]], i64 1
28789 // CHECK30-NEXT:    store i16 [[CONV19]], i16* [[ARRAYIDX21]], align 2
28790 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE22:%.*]]
28791 // CHECK30:       omp.body.continue22:
28792 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC23:%.*]]
28793 // CHECK30:       omp.inner.for.inc23:
28794 // CHECK30-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
28795 // CHECK30-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP21]], 1
28796 // CHECK30-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4
28797 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP22:![0-9]+]]
28798 // CHECK30:       omp.inner.for.end25:
28799 // CHECK30-NEXT:    br label [[OMP_IF_END]]
28800 // CHECK30:       omp_if.end:
28801 // CHECK30-NEXT:    store i32 10, i32* [[I]], align 4
28802 // CHECK30-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
28803 // CHECK30-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]]
28804 // CHECK30-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1
28805 // CHECK30-NEXT:    [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX27]], align 2
28806 // CHECK30-NEXT:    [[CONV28:%.*]] = sext i16 [[TMP23]] to i32
28807 // CHECK30-NEXT:    [[TMP24:%.*]] = load i32, i32* [[B]], align 4
28808 // CHECK30-NEXT:    [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP24]]
28809 // CHECK30-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
28810 // CHECK30-NEXT:    call void @llvm.stackrestore(i8* [[TMP25]])
28811 // CHECK30-NEXT:    ret i32 [[ADD29]]
28812 //
28813 //
28814 // CHECK30-LABEL: define {{[^@]+}}@_ZL7fstatici
28815 // CHECK30-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
28816 // CHECK30-NEXT:  entry:
28817 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28818 // CHECK30-NEXT:    [[A:%.*]] = alloca i32, align 4
28819 // CHECK30-NEXT:    [[AA:%.*]] = alloca i16, align 2
28820 // CHECK30-NEXT:    [[AAA:%.*]] = alloca i8, align 1
28821 // CHECK30-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
28822 // CHECK30-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28823 // CHECK30-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28824 // CHECK30-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
28825 // CHECK30-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
28826 // CHECK30-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28827 // CHECK30-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28828 // CHECK30-NEXT:    [[I:%.*]] = alloca i32, align 4
28829 // CHECK30-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28830 // CHECK30-NEXT:    [[I5:%.*]] = alloca i32, align 4
28831 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28832 // CHECK30-NEXT:    store i32 0, i32* [[A]], align 4
28833 // CHECK30-NEXT:    store i16 0, i16* [[AA]], align 2
28834 // CHECK30-NEXT:    store i8 0, i8* [[AAA]], align 1
28835 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
28836 // CHECK30-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
28837 // CHECK30-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
28838 // CHECK30-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
28839 // CHECK30-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28840 // CHECK30-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28841 // CHECK30-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
28842 // CHECK30-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
28843 // CHECK30-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
28844 // CHECK30-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
28845 // CHECK30-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
28846 // CHECK30-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
28847 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28848 // CHECK30-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
28849 // CHECK30-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
28850 // CHECK30-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28851 // CHECK30-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
28852 // CHECK30-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28853 // CHECK30-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28854 // CHECK30-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
28855 // CHECK30-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
28856 // CHECK30:       simd.if.then:
28857 // CHECK30-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28858 // CHECK30-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
28859 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28860 // CHECK30:       omp.inner.for.cond:
28861 // CHECK30-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28862 // CHECK30-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
28863 // CHECK30-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
28864 // CHECK30-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
28865 // CHECK30-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28866 // CHECK30:       omp.inner.for.body:
28867 // CHECK30-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !24
28868 // CHECK30-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28869 // CHECK30-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
28870 // CHECK30-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
28871 // CHECK30-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !24
28872 // CHECK30-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
28873 // CHECK30-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
28874 // CHECK30-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !24
28875 // CHECK30-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
28876 // CHECK30-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
28877 // CHECK30-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
28878 // CHECK30-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
28879 // CHECK30-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !24
28880 // CHECK30-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !24
28881 // CHECK30-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
28882 // CHECK30-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
28883 // CHECK30-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
28884 // CHECK30-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !24
28885 // CHECK30-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
28886 // CHECK30-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
28887 // CHECK30-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
28888 // CHECK30-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
28889 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28890 // CHECK30:       omp.body.continue:
28891 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28892 // CHECK30:       omp.inner.for.inc:
28893 // CHECK30-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28894 // CHECK30-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
28895 // CHECK30-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
28896 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
28897 // CHECK30:       omp.inner.for.end:
28898 // CHECK30-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28899 // CHECK30-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
28900 // CHECK30-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
28901 // CHECK30-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
28902 // CHECK30-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
28903 // CHECK30-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
28904 // CHECK30-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
28905 // CHECK30-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
28906 // CHECK30-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
28907 // CHECK30-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
28908 // CHECK30-NEXT:    br label [[SIMD_IF_END]]
28909 // CHECK30:       simd.if.end:
28910 // CHECK30-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
28911 // CHECK30-NEXT:    ret i32 [[TMP21]]
28912 //
28913 //
28914 // CHECK30-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
28915 // CHECK30-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
28916 // CHECK30-NEXT:  entry:
28917 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28918 // CHECK30-NEXT:    [[A:%.*]] = alloca i32, align 4
28919 // CHECK30-NEXT:    [[AA:%.*]] = alloca i16, align 2
28920 // CHECK30-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
28921 // CHECK30-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28922 // CHECK30-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28923 // CHECK30-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28924 // CHECK30-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28925 // CHECK30-NEXT:    [[I:%.*]] = alloca i32, align 4
28926 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
28927 // CHECK30-NEXT:    store i32 0, i32* [[A]], align 4
28928 // CHECK30-NEXT:    store i16 0, i16* [[AA]], align 2
28929 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
28930 // CHECK30-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
28931 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
28932 // CHECK30-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
28933 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
28934 // CHECK30:       omp.inner.for.cond:
28935 // CHECK30-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
28936 // CHECK30-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
28937 // CHECK30-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
28938 // CHECK30-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
28939 // CHECK30:       omp.inner.for.body:
28940 // CHECK30-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
28941 // CHECK30-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
28942 // CHECK30-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
28943 // CHECK30-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27
28944 // CHECK30-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !27
28945 // CHECK30-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
28946 // CHECK30-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !27
28947 // CHECK30-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !27
28948 // CHECK30-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
28949 // CHECK30-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
28950 // CHECK30-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
28951 // CHECK30-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !27
28952 // CHECK30-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
28953 // CHECK30-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
28954 // CHECK30-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
28955 // CHECK30-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !27
28956 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
28957 // CHECK30:       omp.body.continue:
28958 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
28959 // CHECK30:       omp.inner.for.inc:
28960 // CHECK30-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
28961 // CHECK30-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
28962 // CHECK30-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
28963 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
28964 // CHECK30:       omp.inner.for.end:
28965 // CHECK30-NEXT:    store i32 10, i32* [[I]], align 4
28966 // CHECK30-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
28967 // CHECK30-NEXT:    ret i32 [[TMP8]]
28968 //
28969 //
28970 // CHECK31-LABEL: define {{[^@]+}}@_Z3fooi
28971 // CHECK31-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
28972 // CHECK31-NEXT:  entry:
28973 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
28974 // CHECK31-NEXT:    [[A:%.*]] = alloca i32, align 4
28975 // CHECK31-NEXT:    [[AA:%.*]] = alloca i16, align 2
28976 // CHECK31-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
28977 // CHECK31-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
28978 // CHECK31-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
28979 // CHECK31-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
28980 // CHECK31-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
28981 // CHECK31-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
28982 // CHECK31-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
28983 // CHECK31-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
28984 // CHECK31-NEXT:    [[TMP:%.*]] = alloca i32, align 4
28985 // CHECK31-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
28986 // CHECK31-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
28987 // CHECK31-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
28988 // CHECK31-NEXT:    [[I:%.*]] = alloca i32, align 4
28989 // CHECK31-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
28990 // CHECK31-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
28991 // CHECK31-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
28992 // CHECK31-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
28993 // CHECK31-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
28994 // CHECK31-NEXT:    [[A8:%.*]] = alloca i32, align 4
28995 // CHECK31-NEXT:    [[A9:%.*]] = alloca i32, align 4
28996 // CHECK31-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
28997 // CHECK31-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
28998 // CHECK31-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
28999 // CHECK31-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
29000 // CHECK31-NEXT:    [[I24:%.*]] = alloca i32, align 4
29001 // CHECK31-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
29002 // CHECK31-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
29003 // CHECK31-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
29004 // CHECK31-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
29005 // CHECK31-NEXT:    [[I40:%.*]] = alloca i32, align 4
29006 // CHECK31-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
29007 // CHECK31-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
29008 // CHECK31-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
29009 // CHECK31-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
29010 // CHECK31-NEXT:    [[I58:%.*]] = alloca i32, align 4
29011 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
29012 // CHECK31-NEXT:    store i32 0, i32* [[A]], align 4
29013 // CHECK31-NEXT:    store i16 0, i16* [[AA]], align 2
29014 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
29015 // CHECK31-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
29016 // CHECK31-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
29017 // CHECK31-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
29018 // CHECK31-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
29019 // CHECK31-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
29020 // CHECK31-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
29021 // CHECK31-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
29022 // CHECK31-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
29023 // CHECK31-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
29024 // CHECK31-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
29025 // CHECK31-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
29026 // CHECK31-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_2]], align 4
29027 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29028 // CHECK31-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29029 // CHECK31-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29030 // CHECK31-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
29031 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29032 // CHECK31:       omp.inner.for.cond:
29033 // CHECK31-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29034 // CHECK31-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
29035 // CHECK31-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
29036 // CHECK31-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29037 // CHECK31:       omp.inner.for.body:
29038 // CHECK31-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29039 // CHECK31-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
29040 // CHECK31-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29041 // CHECK31-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
29042 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29043 // CHECK31:       omp.body.continue:
29044 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29045 // CHECK31:       omp.inner.for.inc:
29046 // CHECK31-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29047 // CHECK31-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
29048 // CHECK31-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29049 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
29050 // CHECK31:       omp.inner.for.end:
29051 // CHECK31-NEXT:    store i32 10, i32* [[I]], align 4
29052 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
29053 // CHECK31-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
29054 // CHECK31-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
29055 // CHECK31-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV7]], align 4
29056 // CHECK31-NEXT:    [[TMP12:%.*]] = load i32, i32* [[A]], align 4
29057 // CHECK31-NEXT:    store i32 [[TMP12]], i32* [[DOTLINEAR_START]], align 4
29058 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
29059 // CHECK31:       omp.inner.for.cond10:
29060 // CHECK31-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
29061 // CHECK31-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
29062 // CHECK31-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
29063 // CHECK31-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
29064 // CHECK31:       omp.inner.for.body12:
29065 // CHECK31-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
29066 // CHECK31-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1
29067 // CHECK31-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
29068 // CHECK31-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4, !nontemporal !8
29069 // CHECK31-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A8]], align 4, !nontemporal !8
29070 // CHECK31-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
29071 // CHECK31-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4, !nontemporal !8
29072 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
29073 // CHECK31:       omp.body.continue16:
29074 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
29075 // CHECK31:       omp.inner.for.inc17:
29076 // CHECK31-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
29077 // CHECK31-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
29078 // CHECK31-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
29079 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP9:![0-9]+]]
29080 // CHECK31:       omp.inner.for.end19:
29081 // CHECK31-NEXT:    store i32 10, i32* [[A]], align 4
29082 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
29083 // CHECK31-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
29084 // CHECK31-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
29085 // CHECK31-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV23]], align 4
29086 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
29087 // CHECK31:       omp.inner.for.cond25:
29088 // CHECK31-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
29089 // CHECK31-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !11
29090 // CHECK31-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
29091 // CHECK31-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
29092 // CHECK31:       omp.inner.for.body27:
29093 // CHECK31-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
29094 // CHECK31-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1
29095 // CHECK31-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
29096 // CHECK31-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !11
29097 // CHECK31-NEXT:    [[TMP22:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !11
29098 // CHECK31-NEXT:    [[CONV:%.*]] = sext i16 [[TMP22]] to i32
29099 // CHECK31-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
29100 // CHECK31-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
29101 // CHECK31-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !11
29102 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
29103 // CHECK31:       omp.body.continue32:
29104 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
29105 // CHECK31:       omp.inner.for.inc33:
29106 // CHECK31-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
29107 // CHECK31-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1
29108 // CHECK31-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
29109 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP12:![0-9]+]]
29110 // CHECK31:       omp.inner.for.end35:
29111 // CHECK31-NEXT:    store i32 10, i32* [[I24]], align 4
29112 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
29113 // CHECK31-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
29114 // CHECK31-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
29115 // CHECK31-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV39]], align 4
29116 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
29117 // CHECK31:       omp.inner.for.cond41:
29118 // CHECK31-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
29119 // CHECK31-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !14
29120 // CHECK31-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
29121 // CHECK31-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
29122 // CHECK31:       omp.inner.for.body43:
29123 // CHECK31-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
29124 // CHECK31-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1
29125 // CHECK31-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
29126 // CHECK31-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !14
29127 // CHECK31-NEXT:    [[TMP28:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !14
29128 // CHECK31-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1
29129 // CHECK31-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !14
29130 // CHECK31-NEXT:    [[TMP29:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !14
29131 // CHECK31-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP29]] to i32
29132 // CHECK31-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
29133 // CHECK31-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
29134 // CHECK31-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !14
29135 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
29136 // CHECK31:       omp.body.continue50:
29137 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
29138 // CHECK31:       omp.inner.for.inc51:
29139 // CHECK31-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
29140 // CHECK31-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1
29141 // CHECK31-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
29142 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP15:![0-9]+]]
29143 // CHECK31:       omp.inner.for.end53:
29144 // CHECK31-NEXT:    store i32 10, i32* [[I40]], align 4
29145 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
29146 // CHECK31-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
29147 // CHECK31-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
29148 // CHECK31-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV57]], align 4
29149 // CHECK31-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 0
29150 // CHECK31-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
29151 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
29152 // CHECK31:       omp.inner.for.cond59:
29153 // CHECK31-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
29154 // CHECK31-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !17
29155 // CHECK31-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]]
29156 // CHECK31-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
29157 // CHECK31:       omp.inner.for.body61:
29158 // CHECK31-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
29159 // CHECK31-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1
29160 // CHECK31-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
29161 // CHECK31-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !17
29162 // CHECK31-NEXT:    [[TMP35:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !17
29163 // CHECK31-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1
29164 // CHECK31-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !17
29165 // CHECK31-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
29166 // CHECK31-NEXT:    [[TMP36:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !17
29167 // CHECK31-NEXT:    [[CONV65:%.*]] = fpext float [[TMP36]] to double
29168 // CHECK31-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
29169 // CHECK31-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
29170 // CHECK31-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !17
29171 // CHECK31-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
29172 // CHECK31-NEXT:    [[TMP37:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !17
29173 // CHECK31-NEXT:    [[CONV69:%.*]] = fpext float [[TMP37]] to double
29174 // CHECK31-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
29175 // CHECK31-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
29176 // CHECK31-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !17
29177 // CHECK31-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
29178 // CHECK31-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i32 0, i32 2
29179 // CHECK31-NEXT:    [[TMP38:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !17
29180 // CHECK31-NEXT:    [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00
29181 // CHECK31-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !17
29182 // CHECK31-NEXT:    [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]]
29183 // CHECK31-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP39]]
29184 // CHECK31-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i32 3
29185 // CHECK31-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !17
29186 // CHECK31-NEXT:    [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00
29187 // CHECK31-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !17
29188 // CHECK31-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
29189 // CHECK31-NEXT:    [[TMP41:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !17
29190 // CHECK31-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1
29191 // CHECK31-NEXT:    store i64 [[ADD78]], i64* [[X]], align 4, !llvm.access.group !17
29192 // CHECK31-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
29193 // CHECK31-NEXT:    [[TMP42:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !17
29194 // CHECK31-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP42]] to i32
29195 // CHECK31-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
29196 // CHECK31-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
29197 // CHECK31-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 4, !llvm.access.group !17
29198 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
29199 // CHECK31:       omp.body.continue82:
29200 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
29201 // CHECK31:       omp.inner.for.inc83:
29202 // CHECK31-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
29203 // CHECK31-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1
29204 // CHECK31-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
29205 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP18:![0-9]+]]
29206 // CHECK31:       omp.inner.for.end85:
29207 // CHECK31-NEXT:    store i32 10, i32* [[I58]], align 4
29208 // CHECK31-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
29209 // CHECK31-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
29210 // CHECK31-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
29211 // CHECK31-NEXT:    ret i32 [[TMP44]]
29212 //
29213 //
29214 // CHECK31-LABEL: define {{[^@]+}}@_Z3bari
29215 // CHECK31-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
29216 // CHECK31-NEXT:  entry:
29217 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
29218 // CHECK31-NEXT:    [[A:%.*]] = alloca i32, align 4
29219 // CHECK31-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
29220 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
29221 // CHECK31-NEXT:    store i32 0, i32* [[A]], align 4
29222 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
29223 // CHECK31-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
29224 // CHECK31-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
29225 // CHECK31-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
29226 // CHECK31-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
29227 // CHECK31-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
29228 // CHECK31-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
29229 // CHECK31-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
29230 // CHECK31-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
29231 // CHECK31-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
29232 // CHECK31-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
29233 // CHECK31-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
29234 // CHECK31-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
29235 // CHECK31-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
29236 // CHECK31-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
29237 // CHECK31-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
29238 // CHECK31-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
29239 // CHECK31-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
29240 // CHECK31-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
29241 // CHECK31-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
29242 // CHECK31-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
29243 // CHECK31-NEXT:    ret i32 [[TMP8]]
29244 //
29245 //
29246 // CHECK31-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
29247 // CHECK31-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
29248 // CHECK31-NEXT:  entry:
29249 // CHECK31-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
29250 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
29251 // CHECK31-NEXT:    [[B:%.*]] = alloca i32, align 4
29252 // CHECK31-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
29253 // CHECK31-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
29254 // CHECK31-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
29255 // CHECK31-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29256 // CHECK31-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29257 // CHECK31-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29258 // CHECK31-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29259 // CHECK31-NEXT:    [[I:%.*]] = alloca i32, align 4
29260 // CHECK31-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
29261 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
29262 // CHECK31-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
29263 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
29264 // CHECK31-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
29265 // CHECK31-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
29266 // CHECK31-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
29267 // CHECK31-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
29268 // CHECK31-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
29269 // CHECK31-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
29270 // CHECK31-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
29271 // CHECK31-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
29272 // CHECK31-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
29273 // CHECK31-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
29274 // CHECK31-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
29275 // CHECK31-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
29276 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29277 // CHECK31-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29278 // CHECK31-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29279 // CHECK31-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
29280 // CHECK31-NEXT:    [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
29281 // CHECK31-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
29282 // CHECK31-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
29283 // CHECK31:       omp_if.then:
29284 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29285 // CHECK31:       omp.inner.for.cond:
29286 // CHECK31-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
29287 // CHECK31-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
29288 // CHECK31-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
29289 // CHECK31-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29290 // CHECK31:       omp.inner.for.body:
29291 // CHECK31-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
29292 // CHECK31-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
29293 // CHECK31-NEXT:    [[ADD3:%.*]] = add nsw i32 0, [[MUL]]
29294 // CHECK31-NEXT:    store i32 [[ADD3]], i32* [[I]], align 4, !llvm.access.group !20
29295 // CHECK31-NEXT:    [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !20
29296 // CHECK31-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP10]] to double
29297 // CHECK31-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
29298 // CHECK31-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
29299 // CHECK31-NEXT:    store double [[ADD4]], double* [[A]], align 4, !llvm.access.group !20
29300 // CHECK31-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
29301 // CHECK31-NEXT:    [[TMP11:%.*]] = load double, double* [[A5]], align 4, !llvm.access.group !20
29302 // CHECK31-NEXT:    [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00
29303 // CHECK31-NEXT:    store double [[INC]], double* [[A5]], align 4, !llvm.access.group !20
29304 // CHECK31-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
29305 // CHECK31-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
29306 // CHECK31-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
29307 // CHECK31-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
29308 // CHECK31-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !20
29309 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29310 // CHECK31:       omp.body.continue:
29311 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29312 // CHECK31:       omp.inner.for.inc:
29313 // CHECK31-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
29314 // CHECK31-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
29315 // CHECK31-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
29316 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
29317 // CHECK31:       omp.inner.for.end:
29318 // CHECK31-NEXT:    br label [[OMP_IF_END:%.*]]
29319 // CHECK31:       omp_if.else:
29320 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
29321 // CHECK31:       omp.inner.for.cond9:
29322 // CHECK31-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
29323 // CHECK31-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29324 // CHECK31-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
29325 // CHECK31-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]]
29326 // CHECK31:       omp.inner.for.body11:
29327 // CHECK31-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
29328 // CHECK31-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP16]], 1
29329 // CHECK31-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
29330 // CHECK31-NEXT:    store i32 [[ADD13]], i32* [[I]], align 4
29331 // CHECK31-NEXT:    [[TMP17:%.*]] = load i32, i32* [[B]], align 4
29332 // CHECK31-NEXT:    [[CONV14:%.*]] = sitofp i32 [[TMP17]] to double
29333 // CHECK31-NEXT:    [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00
29334 // CHECK31-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
29335 // CHECK31-NEXT:    store double [[ADD15]], double* [[A16]], align 4
29336 // CHECK31-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
29337 // CHECK31-NEXT:    [[TMP18:%.*]] = load double, double* [[A17]], align 4
29338 // CHECK31-NEXT:    [[INC18:%.*]] = fadd double [[TMP18]], 1.000000e+00
29339 // CHECK31-NEXT:    store double [[INC18]], double* [[A17]], align 4
29340 // CHECK31-NEXT:    [[CONV19:%.*]] = fptosi double [[INC18]] to i16
29341 // CHECK31-NEXT:    [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]]
29342 // CHECK31-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]]
29343 // CHECK31-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX20]], i32 1
29344 // CHECK31-NEXT:    store i16 [[CONV19]], i16* [[ARRAYIDX21]], align 2
29345 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE22:%.*]]
29346 // CHECK31:       omp.body.continue22:
29347 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC23:%.*]]
29348 // CHECK31:       omp.inner.for.inc23:
29349 // CHECK31-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
29350 // CHECK31-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP20]], 1
29351 // CHECK31-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4
29352 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP23:![0-9]+]]
29353 // CHECK31:       omp.inner.for.end25:
29354 // CHECK31-NEXT:    br label [[OMP_IF_END]]
29355 // CHECK31:       omp_if.end:
29356 // CHECK31-NEXT:    store i32 10, i32* [[I]], align 4
29357 // CHECK31-NEXT:    [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]]
29358 // CHECK31-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]]
29359 // CHECK31-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i32 1
29360 // CHECK31-NEXT:    [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX27]], align 2
29361 // CHECK31-NEXT:    [[CONV28:%.*]] = sext i16 [[TMP22]] to i32
29362 // CHECK31-NEXT:    [[TMP23:%.*]] = load i32, i32* [[B]], align 4
29363 // CHECK31-NEXT:    [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP23]]
29364 // CHECK31-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
29365 // CHECK31-NEXT:    call void @llvm.stackrestore(i8* [[TMP24]])
29366 // CHECK31-NEXT:    ret i32 [[ADD29]]
29367 //
29368 //
29369 // CHECK31-LABEL: define {{[^@]+}}@_ZL7fstatici
29370 // CHECK31-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
29371 // CHECK31-NEXT:  entry:
29372 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
29373 // CHECK31-NEXT:    [[A:%.*]] = alloca i32, align 4
29374 // CHECK31-NEXT:    [[AA:%.*]] = alloca i16, align 2
29375 // CHECK31-NEXT:    [[AAA:%.*]] = alloca i8, align 1
29376 // CHECK31-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
29377 // CHECK31-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29378 // CHECK31-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
29379 // CHECK31-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
29380 // CHECK31-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
29381 // CHECK31-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29382 // CHECK31-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29383 // CHECK31-NEXT:    [[I:%.*]] = alloca i32, align 4
29384 // CHECK31-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29385 // CHECK31-NEXT:    [[I5:%.*]] = alloca i32, align 4
29386 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
29387 // CHECK31-NEXT:    store i32 0, i32* [[A]], align 4
29388 // CHECK31-NEXT:    store i16 0, i16* [[AA]], align 2
29389 // CHECK31-NEXT:    store i8 0, i8* [[AAA]], align 1
29390 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
29391 // CHECK31-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
29392 // CHECK31-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
29393 // CHECK31-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
29394 // CHECK31-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
29395 // CHECK31-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29396 // CHECK31-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
29397 // CHECK31-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
29398 // CHECK31-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
29399 // CHECK31-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
29400 // CHECK31-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
29401 // CHECK31-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
29402 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29403 // CHECK31-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
29404 // CHECK31-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
29405 // CHECK31-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29406 // CHECK31-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
29407 // CHECK31-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29408 // CHECK31-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
29409 // CHECK31-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
29410 // CHECK31-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
29411 // CHECK31:       simd.if.then:
29412 // CHECK31-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29413 // CHECK31-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
29414 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29415 // CHECK31:       omp.inner.for.cond:
29416 // CHECK31-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29417 // CHECK31-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
29418 // CHECK31-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
29419 // CHECK31-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
29420 // CHECK31-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29421 // CHECK31:       omp.inner.for.body:
29422 // CHECK31-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !25
29423 // CHECK31-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29424 // CHECK31-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
29425 // CHECK31-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
29426 // CHECK31-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !25
29427 // CHECK31-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
29428 // CHECK31-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
29429 // CHECK31-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !25
29430 // CHECK31-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
29431 // CHECK31-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
29432 // CHECK31-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
29433 // CHECK31-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
29434 // CHECK31-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !25
29435 // CHECK31-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !25
29436 // CHECK31-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
29437 // CHECK31-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
29438 // CHECK31-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
29439 // CHECK31-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !25
29440 // CHECK31-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
29441 // CHECK31-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
29442 // CHECK31-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
29443 // CHECK31-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
29444 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29445 // CHECK31:       omp.body.continue:
29446 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29447 // CHECK31:       omp.inner.for.inc:
29448 // CHECK31-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29449 // CHECK31-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
29450 // CHECK31-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29451 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
29452 // CHECK31:       omp.inner.for.end:
29453 // CHECK31-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29454 // CHECK31-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
29455 // CHECK31-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29456 // CHECK31-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
29457 // CHECK31-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
29458 // CHECK31-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
29459 // CHECK31-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
29460 // CHECK31-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
29461 // CHECK31-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
29462 // CHECK31-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
29463 // CHECK31-NEXT:    br label [[SIMD_IF_END]]
29464 // CHECK31:       simd.if.end:
29465 // CHECK31-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
29466 // CHECK31-NEXT:    ret i32 [[TMP21]]
29467 //
29468 //
29469 // CHECK31-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
29470 // CHECK31-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
29471 // CHECK31-NEXT:  entry:
29472 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
29473 // CHECK31-NEXT:    [[A:%.*]] = alloca i32, align 4
29474 // CHECK31-NEXT:    [[AA:%.*]] = alloca i16, align 2
29475 // CHECK31-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
29476 // CHECK31-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29477 // CHECK31-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29478 // CHECK31-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29479 // CHECK31-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29480 // CHECK31-NEXT:    [[I:%.*]] = alloca i32, align 4
29481 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
29482 // CHECK31-NEXT:    store i32 0, i32* [[A]], align 4
29483 // CHECK31-NEXT:    store i16 0, i16* [[AA]], align 2
29484 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29485 // CHECK31-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29486 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29487 // CHECK31-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
29488 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29489 // CHECK31:       omp.inner.for.cond:
29490 // CHECK31-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
29491 // CHECK31-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
29492 // CHECK31-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
29493 // CHECK31-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29494 // CHECK31:       omp.inner.for.body:
29495 // CHECK31-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
29496 // CHECK31-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
29497 // CHECK31-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29498 // CHECK31-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !28
29499 // CHECK31-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !28
29500 // CHECK31-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
29501 // CHECK31-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !28
29502 // CHECK31-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !28
29503 // CHECK31-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
29504 // CHECK31-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
29505 // CHECK31-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
29506 // CHECK31-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !28
29507 // CHECK31-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
29508 // CHECK31-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
29509 // CHECK31-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
29510 // CHECK31-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
29511 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29512 // CHECK31:       omp.body.continue:
29513 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29514 // CHECK31:       omp.inner.for.inc:
29515 // CHECK31-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
29516 // CHECK31-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
29517 // CHECK31-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
29518 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
29519 // CHECK31:       omp.inner.for.end:
29520 // CHECK31-NEXT:    store i32 10, i32* [[I]], align 4
29521 // CHECK31-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
29522 // CHECK31-NEXT:    ret i32 [[TMP8]]
29523 //
29524 //
29525 // CHECK32-LABEL: define {{[^@]+}}@_Z3fooi
29526 // CHECK32-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
29527 // CHECK32-NEXT:  entry:
29528 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
29529 // CHECK32-NEXT:    [[A:%.*]] = alloca i32, align 4
29530 // CHECK32-NEXT:    [[AA:%.*]] = alloca i16, align 2
29531 // CHECK32-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
29532 // CHECK32-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
29533 // CHECK32-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
29534 // CHECK32-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
29535 // CHECK32-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
29536 // CHECK32-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
29537 // CHECK32-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
29538 // CHECK32-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
29539 // CHECK32-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29540 // CHECK32-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29541 // CHECK32-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29542 // CHECK32-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29543 // CHECK32-NEXT:    [[I:%.*]] = alloca i32, align 4
29544 // CHECK32-NEXT:    [[_TMP4:%.*]] = alloca i32, align 4
29545 // CHECK32-NEXT:    [[DOTOMP_LB5:%.*]] = alloca i32, align 4
29546 // CHECK32-NEXT:    [[DOTOMP_UB6:%.*]] = alloca i32, align 4
29547 // CHECK32-NEXT:    [[DOTOMP_IV7:%.*]] = alloca i32, align 4
29548 // CHECK32-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
29549 // CHECK32-NEXT:    [[A8:%.*]] = alloca i32, align 4
29550 // CHECK32-NEXT:    [[A9:%.*]] = alloca i32, align 4
29551 // CHECK32-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
29552 // CHECK32-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
29553 // CHECK32-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
29554 // CHECK32-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i32, align 4
29555 // CHECK32-NEXT:    [[I24:%.*]] = alloca i32, align 4
29556 // CHECK32-NEXT:    [[_TMP36:%.*]] = alloca i32, align 4
29557 // CHECK32-NEXT:    [[DOTOMP_LB37:%.*]] = alloca i32, align 4
29558 // CHECK32-NEXT:    [[DOTOMP_UB38:%.*]] = alloca i32, align 4
29559 // CHECK32-NEXT:    [[DOTOMP_IV39:%.*]] = alloca i32, align 4
29560 // CHECK32-NEXT:    [[I40:%.*]] = alloca i32, align 4
29561 // CHECK32-NEXT:    [[_TMP54:%.*]] = alloca i32, align 4
29562 // CHECK32-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
29563 // CHECK32-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
29564 // CHECK32-NEXT:    [[DOTOMP_IV57:%.*]] = alloca i32, align 4
29565 // CHECK32-NEXT:    [[I58:%.*]] = alloca i32, align 4
29566 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
29567 // CHECK32-NEXT:    store i32 0, i32* [[A]], align 4
29568 // CHECK32-NEXT:    store i16 0, i16* [[AA]], align 2
29569 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
29570 // CHECK32-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
29571 // CHECK32-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
29572 // CHECK32-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
29573 // CHECK32-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
29574 // CHECK32-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
29575 // CHECK32-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
29576 // CHECK32-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
29577 // CHECK32-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
29578 // CHECK32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
29579 // CHECK32-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
29580 // CHECK32-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
29581 // CHECK32-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_2]], align 4
29582 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29583 // CHECK32-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29584 // CHECK32-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29585 // CHECK32-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
29586 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29587 // CHECK32:       omp.inner.for.cond:
29588 // CHECK32-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29589 // CHECK32-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
29590 // CHECK32-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
29591 // CHECK32-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29592 // CHECK32:       omp.inner.for.body:
29593 // CHECK32-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29594 // CHECK32-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
29595 // CHECK32-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
29596 // CHECK32-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
29597 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29598 // CHECK32:       omp.body.continue:
29599 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29600 // CHECK32:       omp.inner.for.inc:
29601 // CHECK32-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29602 // CHECK32-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
29603 // CHECK32-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
29604 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
29605 // CHECK32:       omp.inner.for.end:
29606 // CHECK32-NEXT:    store i32 10, i32* [[I]], align 4
29607 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB5]], align 4
29608 // CHECK32-NEXT:    store i32 9, i32* [[DOTOMP_UB6]], align 4
29609 // CHECK32-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB5]], align 4
29610 // CHECK32-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV7]], align 4
29611 // CHECK32-NEXT:    [[TMP12:%.*]] = load i32, i32* [[A]], align 4
29612 // CHECK32-NEXT:    store i32 [[TMP12]], i32* [[DOTLINEAR_START]], align 4
29613 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND10:%.*]]
29614 // CHECK32:       omp.inner.for.cond10:
29615 // CHECK32-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
29616 // CHECK32-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB6]], align 4
29617 // CHECK32-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
29618 // CHECK32-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
29619 // CHECK32:       omp.inner.for.body12:
29620 // CHECK32-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
29621 // CHECK32-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1
29622 // CHECK32-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
29623 // CHECK32-NEXT:    store i32 [[ADD14]], i32* [[A8]], align 4, !nontemporal !8
29624 // CHECK32-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A8]], align 4, !nontemporal !8
29625 // CHECK32-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
29626 // CHECK32-NEXT:    store i32 [[ADD15]], i32* [[A8]], align 4, !nontemporal !8
29627 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
29628 // CHECK32:       omp.body.continue16:
29629 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
29630 // CHECK32:       omp.inner.for.inc17:
29631 // CHECK32-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV7]], align 4
29632 // CHECK32-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
29633 // CHECK32-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV7]], align 4
29634 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP9:![0-9]+]]
29635 // CHECK32:       omp.inner.for.end19:
29636 // CHECK32-NEXT:    store i32 10, i32* [[A]], align 4
29637 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
29638 // CHECK32-NEXT:    store i32 9, i32* [[DOTOMP_UB22]], align 4
29639 // CHECK32-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
29640 // CHECK32-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV23]], align 4
29641 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND25:%.*]]
29642 // CHECK32:       omp.inner.for.cond25:
29643 // CHECK32-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
29644 // CHECK32-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !11
29645 // CHECK32-NEXT:    [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
29646 // CHECK32-NEXT:    br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]]
29647 // CHECK32:       omp.inner.for.body27:
29648 // CHECK32-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
29649 // CHECK32-NEXT:    [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1
29650 // CHECK32-NEXT:    [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
29651 // CHECK32-NEXT:    store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !11
29652 // CHECK32-NEXT:    [[TMP22:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !11
29653 // CHECK32-NEXT:    [[CONV:%.*]] = sext i16 [[TMP22]] to i32
29654 // CHECK32-NEXT:    [[ADD30:%.*]] = add nsw i32 [[CONV]], 1
29655 // CHECK32-NEXT:    [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16
29656 // CHECK32-NEXT:    store i16 [[CONV31]], i16* [[AA]], align 2, !llvm.access.group !11
29657 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE32:%.*]]
29658 // CHECK32:       omp.body.continue32:
29659 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC33:%.*]]
29660 // CHECK32:       omp.inner.for.inc33:
29661 // CHECK32-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
29662 // CHECK32-NEXT:    [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1
29663 // CHECK32-NEXT:    store i32 [[ADD34]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !11
29664 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP12:![0-9]+]]
29665 // CHECK32:       omp.inner.for.end35:
29666 // CHECK32-NEXT:    store i32 10, i32* [[I24]], align 4
29667 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB37]], align 4
29668 // CHECK32-NEXT:    store i32 9, i32* [[DOTOMP_UB38]], align 4
29669 // CHECK32-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_LB37]], align 4
29670 // CHECK32-NEXT:    store i32 [[TMP24]], i32* [[DOTOMP_IV39]], align 4
29671 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND41:%.*]]
29672 // CHECK32:       omp.inner.for.cond41:
29673 // CHECK32-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
29674 // CHECK32-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_UB38]], align 4, !llvm.access.group !14
29675 // CHECK32-NEXT:    [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
29676 // CHECK32-NEXT:    br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
29677 // CHECK32:       omp.inner.for.body43:
29678 // CHECK32-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
29679 // CHECK32-NEXT:    [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1
29680 // CHECK32-NEXT:    [[ADD45:%.*]] = add nsw i32 0, [[MUL44]]
29681 // CHECK32-NEXT:    store i32 [[ADD45]], i32* [[I40]], align 4, !llvm.access.group !14
29682 // CHECK32-NEXT:    [[TMP28:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !14
29683 // CHECK32-NEXT:    [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1
29684 // CHECK32-NEXT:    store i32 [[ADD46]], i32* [[A]], align 4, !llvm.access.group !14
29685 // CHECK32-NEXT:    [[TMP29:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !14
29686 // CHECK32-NEXT:    [[CONV47:%.*]] = sext i16 [[TMP29]] to i32
29687 // CHECK32-NEXT:    [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1
29688 // CHECK32-NEXT:    [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16
29689 // CHECK32-NEXT:    store i16 [[CONV49]], i16* [[AA]], align 2, !llvm.access.group !14
29690 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE50:%.*]]
29691 // CHECK32:       omp.body.continue50:
29692 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC51:%.*]]
29693 // CHECK32:       omp.inner.for.inc51:
29694 // CHECK32-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
29695 // CHECK32-NEXT:    [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1
29696 // CHECK32-NEXT:    store i32 [[ADD52]], i32* [[DOTOMP_IV39]], align 4, !llvm.access.group !14
29697 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP15:![0-9]+]]
29698 // CHECK32:       omp.inner.for.end53:
29699 // CHECK32-NEXT:    store i32 10, i32* [[I40]], align 4
29700 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
29701 // CHECK32-NEXT:    store i32 9, i32* [[DOTOMP_UB56]], align 4
29702 // CHECK32-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
29703 // CHECK32-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV57]], align 4
29704 // CHECK32-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 0
29705 // CHECK32-NEXT:    call void @llvm.assume(i1 true) [ "align"(float* [[ARRAYDECAY]], i32 16) ]
29706 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND59:%.*]]
29707 // CHECK32:       omp.inner.for.cond59:
29708 // CHECK32-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
29709 // CHECK32-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !17
29710 // CHECK32-NEXT:    [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]]
29711 // CHECK32-NEXT:    br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]]
29712 // CHECK32:       omp.inner.for.body61:
29713 // CHECK32-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
29714 // CHECK32-NEXT:    [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1
29715 // CHECK32-NEXT:    [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
29716 // CHECK32-NEXT:    store i32 [[ADD63]], i32* [[I58]], align 4, !llvm.access.group !17
29717 // CHECK32-NEXT:    [[TMP35:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !17
29718 // CHECK32-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1
29719 // CHECK32-NEXT:    store i32 [[ADD64]], i32* [[A]], align 4, !llvm.access.group !17
29720 // CHECK32-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
29721 // CHECK32-NEXT:    [[TMP36:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !17
29722 // CHECK32-NEXT:    [[CONV65:%.*]] = fpext float [[TMP36]] to double
29723 // CHECK32-NEXT:    [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00
29724 // CHECK32-NEXT:    [[CONV67:%.*]] = fptrunc double [[ADD66]] to float
29725 // CHECK32-NEXT:    store float [[CONV67]], float* [[ARRAYIDX]], align 4, !llvm.access.group !17
29726 // CHECK32-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
29727 // CHECK32-NEXT:    [[TMP37:%.*]] = load float, float* [[ARRAYIDX68]], align 4, !llvm.access.group !17
29728 // CHECK32-NEXT:    [[CONV69:%.*]] = fpext float [[TMP37]] to double
29729 // CHECK32-NEXT:    [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00
29730 // CHECK32-NEXT:    [[CONV71:%.*]] = fptrunc double [[ADD70]] to float
29731 // CHECK32-NEXT:    store float [[CONV71]], float* [[ARRAYIDX68]], align 4, !llvm.access.group !17
29732 // CHECK32-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
29733 // CHECK32-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX72]], i32 0, i32 2
29734 // CHECK32-NEXT:    [[TMP38:%.*]] = load double, double* [[ARRAYIDX73]], align 8, !llvm.access.group !17
29735 // CHECK32-NEXT:    [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00
29736 // CHECK32-NEXT:    store double [[ADD74]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !17
29737 // CHECK32-NEXT:    [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]]
29738 // CHECK32-NEXT:    [[ARRAYIDX75:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP39]]
29739 // CHECK32-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX75]], i32 3
29740 // CHECK32-NEXT:    [[TMP40:%.*]] = load double, double* [[ARRAYIDX76]], align 8, !llvm.access.group !17
29741 // CHECK32-NEXT:    [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00
29742 // CHECK32-NEXT:    store double [[ADD77]], double* [[ARRAYIDX76]], align 8, !llvm.access.group !17
29743 // CHECK32-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
29744 // CHECK32-NEXT:    [[TMP41:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !17
29745 // CHECK32-NEXT:    [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1
29746 // CHECK32-NEXT:    store i64 [[ADD78]], i64* [[X]], align 4, !llvm.access.group !17
29747 // CHECK32-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
29748 // CHECK32-NEXT:    [[TMP42:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !17
29749 // CHECK32-NEXT:    [[CONV79:%.*]] = sext i8 [[TMP42]] to i32
29750 // CHECK32-NEXT:    [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1
29751 // CHECK32-NEXT:    [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8
29752 // CHECK32-NEXT:    store i8 [[CONV81]], i8* [[Y]], align 4, !llvm.access.group !17
29753 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE82:%.*]]
29754 // CHECK32:       omp.body.continue82:
29755 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC83:%.*]]
29756 // CHECK32:       omp.inner.for.inc83:
29757 // CHECK32-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
29758 // CHECK32-NEXT:    [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1
29759 // CHECK32-NEXT:    store i32 [[ADD84]], i32* [[DOTOMP_IV57]], align 4, !llvm.access.group !17
29760 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP18:![0-9]+]]
29761 // CHECK32:       omp.inner.for.end85:
29762 // CHECK32-NEXT:    store i32 10, i32* [[I58]], align 4
29763 // CHECK32-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
29764 // CHECK32-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
29765 // CHECK32-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
29766 // CHECK32-NEXT:    ret i32 [[TMP44]]
29767 //
29768 //
29769 // CHECK32-LABEL: define {{[^@]+}}@_Z3bari
29770 // CHECK32-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
29771 // CHECK32-NEXT:  entry:
29772 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
29773 // CHECK32-NEXT:    [[A:%.*]] = alloca i32, align 4
29774 // CHECK32-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
29775 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
29776 // CHECK32-NEXT:    store i32 0, i32* [[A]], align 4
29777 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
29778 // CHECK32-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
29779 // CHECK32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
29780 // CHECK32-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
29781 // CHECK32-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
29782 // CHECK32-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
29783 // CHECK32-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef [[S]], i32 noundef [[TMP2]])
29784 // CHECK32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
29785 // CHECK32-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
29786 // CHECK32-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
29787 // CHECK32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
29788 // CHECK32-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
29789 // CHECK32-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
29790 // CHECK32-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
29791 // CHECK32-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
29792 // CHECK32-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
29793 // CHECK32-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
29794 // CHECK32-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
29795 // CHECK32-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
29796 // CHECK32-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
29797 // CHECK32-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
29798 // CHECK32-NEXT:    ret i32 [[TMP8]]
29799 //
29800 //
29801 // CHECK32-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
29802 // CHECK32-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
29803 // CHECK32-NEXT:  entry:
29804 // CHECK32-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
29805 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
29806 // CHECK32-NEXT:    [[B:%.*]] = alloca i32, align 4
29807 // CHECK32-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
29808 // CHECK32-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
29809 // CHECK32-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
29810 // CHECK32-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29811 // CHECK32-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29812 // CHECK32-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29813 // CHECK32-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29814 // CHECK32-NEXT:    [[I:%.*]] = alloca i32, align 4
29815 // CHECK32-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
29816 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
29817 // CHECK32-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
29818 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
29819 // CHECK32-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
29820 // CHECK32-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
29821 // CHECK32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
29822 // CHECK32-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
29823 // CHECK32-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
29824 // CHECK32-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
29825 // CHECK32-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
29826 // CHECK32-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
29827 // CHECK32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
29828 // CHECK32-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
29829 // CHECK32-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
29830 // CHECK32-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
29831 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29832 // CHECK32-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
29833 // CHECK32-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29834 // CHECK32-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
29835 // CHECK32-NEXT:    [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
29836 // CHECK32-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
29837 // CHECK32-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
29838 // CHECK32:       omp_if.then:
29839 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29840 // CHECK32:       omp.inner.for.cond:
29841 // CHECK32-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
29842 // CHECK32-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
29843 // CHECK32-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
29844 // CHECK32-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29845 // CHECK32:       omp.inner.for.body:
29846 // CHECK32-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
29847 // CHECK32-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
29848 // CHECK32-NEXT:    [[ADD3:%.*]] = add nsw i32 0, [[MUL]]
29849 // CHECK32-NEXT:    store i32 [[ADD3]], i32* [[I]], align 4, !llvm.access.group !20
29850 // CHECK32-NEXT:    [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !20
29851 // CHECK32-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP10]] to double
29852 // CHECK32-NEXT:    [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00
29853 // CHECK32-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
29854 // CHECK32-NEXT:    store double [[ADD4]], double* [[A]], align 4, !llvm.access.group !20
29855 // CHECK32-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
29856 // CHECK32-NEXT:    [[TMP11:%.*]] = load double, double* [[A5]], align 4, !llvm.access.group !20
29857 // CHECK32-NEXT:    [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00
29858 // CHECK32-NEXT:    store double [[INC]], double* [[A5]], align 4, !llvm.access.group !20
29859 // CHECK32-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
29860 // CHECK32-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
29861 // CHECK32-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
29862 // CHECK32-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
29863 // CHECK32-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !20
29864 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
29865 // CHECK32:       omp.body.continue:
29866 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
29867 // CHECK32:       omp.inner.for.inc:
29868 // CHECK32-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
29869 // CHECK32-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
29870 // CHECK32-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
29871 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
29872 // CHECK32:       omp.inner.for.end:
29873 // CHECK32-NEXT:    br label [[OMP_IF_END:%.*]]
29874 // CHECK32:       omp_if.else:
29875 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
29876 // CHECK32:       omp.inner.for.cond9:
29877 // CHECK32-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
29878 // CHECK32-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
29879 // CHECK32-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
29880 // CHECK32-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]]
29881 // CHECK32:       omp.inner.for.body11:
29882 // CHECK32-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
29883 // CHECK32-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP16]], 1
29884 // CHECK32-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
29885 // CHECK32-NEXT:    store i32 [[ADD13]], i32* [[I]], align 4
29886 // CHECK32-NEXT:    [[TMP17:%.*]] = load i32, i32* [[B]], align 4
29887 // CHECK32-NEXT:    [[CONV14:%.*]] = sitofp i32 [[TMP17]] to double
29888 // CHECK32-NEXT:    [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00
29889 // CHECK32-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
29890 // CHECK32-NEXT:    store double [[ADD15]], double* [[A16]], align 4
29891 // CHECK32-NEXT:    [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
29892 // CHECK32-NEXT:    [[TMP18:%.*]] = load double, double* [[A17]], align 4
29893 // CHECK32-NEXT:    [[INC18:%.*]] = fadd double [[TMP18]], 1.000000e+00
29894 // CHECK32-NEXT:    store double [[INC18]], double* [[A17]], align 4
29895 // CHECK32-NEXT:    [[CONV19:%.*]] = fptosi double [[INC18]] to i16
29896 // CHECK32-NEXT:    [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]]
29897 // CHECK32-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]]
29898 // CHECK32-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX20]], i32 1
29899 // CHECK32-NEXT:    store i16 [[CONV19]], i16* [[ARRAYIDX21]], align 2
29900 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE22:%.*]]
29901 // CHECK32:       omp.body.continue22:
29902 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC23:%.*]]
29903 // CHECK32:       omp.inner.for.inc23:
29904 // CHECK32-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
29905 // CHECK32-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP20]], 1
29906 // CHECK32-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4
29907 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP23:![0-9]+]]
29908 // CHECK32:       omp.inner.for.end25:
29909 // CHECK32-NEXT:    br label [[OMP_IF_END]]
29910 // CHECK32:       omp_if.end:
29911 // CHECK32-NEXT:    store i32 10, i32* [[I]], align 4
29912 // CHECK32-NEXT:    [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]]
29913 // CHECK32-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]]
29914 // CHECK32-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i32 1
29915 // CHECK32-NEXT:    [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX27]], align 2
29916 // CHECK32-NEXT:    [[CONV28:%.*]] = sext i16 [[TMP22]] to i32
29917 // CHECK32-NEXT:    [[TMP23:%.*]] = load i32, i32* [[B]], align 4
29918 // CHECK32-NEXT:    [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP23]]
29919 // CHECK32-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
29920 // CHECK32-NEXT:    call void @llvm.stackrestore(i8* [[TMP24]])
29921 // CHECK32-NEXT:    ret i32 [[ADD29]]
29922 //
29923 //
29924 // CHECK32-LABEL: define {{[^@]+}}@_ZL7fstatici
29925 // CHECK32-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
29926 // CHECK32-NEXT:  entry:
29927 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
29928 // CHECK32-NEXT:    [[A:%.*]] = alloca i32, align 4
29929 // CHECK32-NEXT:    [[AA:%.*]] = alloca i16, align 2
29930 // CHECK32-NEXT:    [[AAA:%.*]] = alloca i8, align 1
29931 // CHECK32-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
29932 // CHECK32-NEXT:    [[TMP:%.*]] = alloca i32, align 4
29933 // CHECK32-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
29934 // CHECK32-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
29935 // CHECK32-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
29936 // CHECK32-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
29937 // CHECK32-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
29938 // CHECK32-NEXT:    [[I:%.*]] = alloca i32, align 4
29939 // CHECK32-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
29940 // CHECK32-NEXT:    [[I5:%.*]] = alloca i32, align 4
29941 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
29942 // CHECK32-NEXT:    store i32 0, i32* [[A]], align 4
29943 // CHECK32-NEXT:    store i16 0, i16* [[AA]], align 2
29944 // CHECK32-NEXT:    store i8 0, i8* [[AAA]], align 1
29945 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
29946 // CHECK32-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
29947 // CHECK32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
29948 // CHECK32-NEXT:    store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
29949 // CHECK32-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
29950 // CHECK32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29951 // CHECK32-NEXT:    [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
29952 // CHECK32-NEXT:    [[SUB3:%.*]] = sub i32 [[SUB]], 1
29953 // CHECK32-NEXT:    [[ADD:%.*]] = add i32 [[SUB3]], 1
29954 // CHECK32-NEXT:    [[DIV:%.*]] = udiv i32 [[ADD]], 1
29955 // CHECK32-NEXT:    [[SUB4:%.*]] = sub i32 [[DIV]], 1
29956 // CHECK32-NEXT:    store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
29957 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
29958 // CHECK32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
29959 // CHECK32-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
29960 // CHECK32-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29961 // CHECK32-NEXT:    store i32 [[TMP5]], i32* [[I]], align 4
29962 // CHECK32-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
29963 // CHECK32-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
29964 // CHECK32-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
29965 // CHECK32-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
29966 // CHECK32:       simd.if.then:
29967 // CHECK32-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
29968 // CHECK32-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
29969 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
29970 // CHECK32:       omp.inner.for.cond:
29971 // CHECK32-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29972 // CHECK32-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
29973 // CHECK32-NEXT:    [[ADD6:%.*]] = add i32 [[TMP10]], 1
29974 // CHECK32-NEXT:    [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]]
29975 // CHECK32-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
29976 // CHECK32:       omp.inner.for.body:
29977 // CHECK32-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !25
29978 // CHECK32-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
29979 // CHECK32-NEXT:    [[MUL:%.*]] = mul i32 [[TMP12]], 1
29980 // CHECK32-NEXT:    [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]]
29981 // CHECK32-NEXT:    store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !25
29982 // CHECK32-NEXT:    [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
29983 // CHECK32-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1
29984 // CHECK32-NEXT:    store i32 [[ADD9]], i32* [[A]], align 4, !llvm.access.group !25
29985 // CHECK32-NEXT:    [[TMP14:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
29986 // CHECK32-NEXT:    [[CONV:%.*]] = sext i16 [[TMP14]] to i32
29987 // CHECK32-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV]], 1
29988 // CHECK32-NEXT:    [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16
29989 // CHECK32-NEXT:    store i16 [[CONV11]], i16* [[AA]], align 2, !llvm.access.group !25
29990 // CHECK32-NEXT:    [[TMP15:%.*]] = load i8, i8* [[AAA]], align 1, !llvm.access.group !25
29991 // CHECK32-NEXT:    [[CONV12:%.*]] = sext i8 [[TMP15]] to i32
29992 // CHECK32-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
29993 // CHECK32-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8
29994 // CHECK32-NEXT:    store i8 [[CONV14]], i8* [[AAA]], align 1, !llvm.access.group !25
29995 // CHECK32-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
29996 // CHECK32-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
29997 // CHECK32-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
29998 // CHECK32-NEXT:    store i32 [[ADD15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
29999 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30000 // CHECK32:       omp.body.continue:
30001 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30002 // CHECK32:       omp.inner.for.inc:
30003 // CHECK32-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30004 // CHECK32-NEXT:    [[ADD16:%.*]] = add i32 [[TMP17]], 1
30005 // CHECK32-NEXT:    store i32 [[ADD16]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
30006 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
30007 // CHECK32:       omp.inner.for.end:
30008 // CHECK32-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30009 // CHECK32-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
30010 // CHECK32-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
30011 // CHECK32-NEXT:    [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]]
30012 // CHECK32-NEXT:    [[SUB18:%.*]] = sub i32 [[SUB17]], 1
30013 // CHECK32-NEXT:    [[ADD19:%.*]] = add i32 [[SUB18]], 1
30014 // CHECK32-NEXT:    [[DIV20:%.*]] = udiv i32 [[ADD19]], 1
30015 // CHECK32-NEXT:    [[MUL21:%.*]] = mul i32 [[DIV20]], 1
30016 // CHECK32-NEXT:    [[ADD22:%.*]] = add i32 [[TMP18]], [[MUL21]]
30017 // CHECK32-NEXT:    store i32 [[ADD22]], i32* [[I5]], align 4
30018 // CHECK32-NEXT:    br label [[SIMD_IF_END]]
30019 // CHECK32:       simd.if.end:
30020 // CHECK32-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
30021 // CHECK32-NEXT:    ret i32 [[TMP21]]
30022 //
30023 //
30024 // CHECK32-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
30025 // CHECK32-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
30026 // CHECK32-NEXT:  entry:
30027 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
30028 // CHECK32-NEXT:    [[A:%.*]] = alloca i32, align 4
30029 // CHECK32-NEXT:    [[AA:%.*]] = alloca i16, align 2
30030 // CHECK32-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
30031 // CHECK32-NEXT:    [[TMP:%.*]] = alloca i32, align 4
30032 // CHECK32-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
30033 // CHECK32-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
30034 // CHECK32-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
30035 // CHECK32-NEXT:    [[I:%.*]] = alloca i32, align 4
30036 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
30037 // CHECK32-NEXT:    store i32 0, i32* [[A]], align 4
30038 // CHECK32-NEXT:    store i16 0, i16* [[AA]], align 2
30039 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
30040 // CHECK32-NEXT:    store i32 9, i32* [[DOTOMP_UB]], align 4
30041 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
30042 // CHECK32-NEXT:    store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4
30043 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
30044 // CHECK32:       omp.inner.for.cond:
30045 // CHECK32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
30046 // CHECK32-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
30047 // CHECK32-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
30048 // CHECK32-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
30049 // CHECK32:       omp.inner.for.body:
30050 // CHECK32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
30051 // CHECK32-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
30052 // CHECK32-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
30053 // CHECK32-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !28
30054 // CHECK32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !28
30055 // CHECK32-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
30056 // CHECK32-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !28
30057 // CHECK32-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !28
30058 // CHECK32-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
30059 // CHECK32-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
30060 // CHECK32-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
30061 // CHECK32-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !28
30062 // CHECK32-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
30063 // CHECK32-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
30064 // CHECK32-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
30065 // CHECK32-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !28
30066 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
30067 // CHECK32:       omp.body.continue:
30068 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
30069 // CHECK32:       omp.inner.for.inc:
30070 // CHECK32-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
30071 // CHECK32-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1
30072 // CHECK32-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
30073 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
30074 // CHECK32:       omp.inner.for.end:
30075 // CHECK32-NEXT:    store i32 10, i32* [[I]], align 4
30076 // CHECK32-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
30077 // CHECK32-NEXT:    ret i32 [[TMP8]]
30078 //
30079