1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test host codegen.
3 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK1
4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
5 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK2
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK4
9 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK5
10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
11 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK6
12 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK7
13 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
14 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK8
15 
16 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK9
17 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK10
19 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK11
20 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
21 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK12
22 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK13
23 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
24 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK14
25 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK15
26 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
27 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK16
28 
29 // Test target codegen - host bc file has to be created first.
30 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
31 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK17
32 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
33 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK18
34 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
35 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK19
36 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
37 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK20
38 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
39 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK21
40 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
41 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK22
42 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
43 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK23
44 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
45 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK24
46 
47 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
48 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK25
49 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
50 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK26
51 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
52 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK27
53 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
54 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK28
55 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
56 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK29
57 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
58 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK30
59 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
60 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK31
61 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
62 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap  %s --check-prefix=CHECK32
63 
64 // expected-no-diagnostics
65 #ifndef HEADER
66 #define HEADER
67 
68 
69 
70 // We have 8 target regions, but only 7 that actually will generate offloading
71 // code, only 6 will have mapped arguments, and only 4 have all-constant map
72 // sizes.
73 
74 
75 
76 // Check target registration is registered as a Ctor.
77 
78 
79 template<typename tx, typename ty>
80 struct TT{
81   tx X;
82   ty Y;
83 };
84 
85 long long get_val() { return 0; }
86 
87 int foo(int n) {
88   int a = 0;
89   short aa = 0;
90   float b[10];
91   float bn[n];
92   double c[5][10];
93   double cn[5][n];
94   TT<long long, char> d;
95 
96   #pragma omp target parallel for simd nowait
97   for (int i = 3; i < 32; i += 5) {
98   }
99 
100   long long k = get_val();
101   #pragma omp target parallel for simd if(target: 0) linear(k : 3) schedule(dynamic)
102   for (int i = 10; i > 1; i--) {
103     a += 1;
104   }
105 
106 
107   int lin = 12;
108   #pragma omp target parallel for simd if(target: 1) linear(lin, a : get_val())
109   for (unsigned long long it = 2000; it >= 600; it-=400) {
110     aa += 1;
111   }
112 
113 
114 
115 
116   #pragma omp target parallel for simd if(target: n>10)
117   for (short it = 6; it <= 20; it-=-4) {
118     a += 1;
119     aa += 1;
120   }
121 
122   // We capture 3 VLA sizes in this target region
123 
124 
125 
126 
127 
128   // The names below are not necessarily consistent with the names used for the
129   // addresses above as some are repeated.
130 
131 
132 
133 
134 
135 
136 
137 
138 
139 
140   #pragma omp target parallel for simd if(target: n>20) schedule(static, a)
141   for (unsigned char it = 'z'; it >= 'a'; it+=-1) {
142     a += 1;
143     b[2] += 1.0;
144     bn[3] += 1.0;
145     c[1][2] += 1.0;
146     cn[1][3] += 1.0;
147     d.X += 1;
148     d.Y += 1;
149   }
150 
151   return a;
152 }
153 
154 // Check that the offloading functions are emitted and that the arguments are
155 // correct and loaded correctly for the target regions in foo().
156 
157 
158 
159 
160 // Create stack storage and store argument in there.
161 
162 // Create stack storage and store argument in there.
163 
164 // Create stack storage and store argument in there.
165 
166 // Create local storage for each capture.
167 
168 
169 
170 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
171 
172 template<typename tx>
173 tx ftemplate(int n) {
174   tx a = 0;
175   short aa = 0;
176   tx b[10];
177 
178   #pragma omp target parallel for simd if(target: n>40)
179   for (long long i = -10; i < 10; i += 3) {
180     a += 1;
181     aa += 1;
182     b[2] += 1;
183   }
184 
185   return a;
186 }
187 
188 static
189 int fstatic(int n) {
190   int a = 0;
191   short aa = 0;
192   char aaa = 0;
193   int b[10];
194 
195   #pragma omp target parallel for simd if(target: n>50)
196   for (unsigned i=100; i<10; i+=10) {
197     a += 1;
198     aa += 1;
199     aaa += 1;
200     b[2] += 1;
201   }
202 
203   return a;
204 }
205 
206 struct S1 {
207   double a;
208 
209   int r1(int n){
210     int b = n+1;
211     short int c[2][n];
212 
213 #ifdef OMP5
214     #pragma omp target parallel for simd if(n>60) nontemporal(a)
215 #else
216     #pragma omp target parallel for simd if(target: n>60)
217 #endif // OMP5
218     for (unsigned long long it = 2000; it >= 600; it -= 400) {
219       this->a = (double)b + 1.5;
220       c[1][1] = ++a;
221     }
222 
223     return c[1][1] + (int)b;
224   }
225 };
226 
227 int bar(int n){
228   int a = 0;
229 
230   a += foo(n);
231 
232   S1 S;
233   a += S.r1(n);
234 
235   a += fstatic(n);
236 
237   a += ftemplate<int>(n);
238 
239   return a;
240 }
241 
242 
243 
244 // We capture 2 VLA sizes in this target region
245 
246 
247 // The names below are not necessarily consistent with the names used for the
248 // addresses above as some are repeated.
249 
250 
251 
252 
253 
254 
255 
256 
257 
258 
259 
260 
261 
262 
263 
264 
265 
266 
267 
268 // Check that the offloading functions are emitted and that the arguments are
269 // correct and loaded correctly for the target regions of the callees of bar().
270 
271 // Create local storage for each capture.
272 // Store captures in the context.
273 
274 
275 
276 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
277 
278 
279 // Create local storage for each capture.
280 // Store captures in the context.
281 
282 
283 
284 
285 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
286 
287 // Create local storage for each capture.
288 // Store captures in the context.
289 
290 
291 
292 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function.
293 
294 
295 #endif
296 // CHECK1-LABEL: define {{[^@]+}}@_Z7get_valv
297 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
298 // CHECK1-NEXT:  entry:
299 // CHECK1-NEXT:    ret i64 0
300 //
301 //
302 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooi
303 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
304 // CHECK1-NEXT:  entry:
305 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
306 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
307 // CHECK1-NEXT:    [[AA:%.*]] = alloca i16, align 2
308 // CHECK1-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
309 // CHECK1-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
310 // CHECK1-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
311 // CHECK1-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
312 // CHECK1-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
313 // CHECK1-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
314 // CHECK1-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
315 // CHECK1-NEXT:    [[K:%.*]] = alloca i64, align 8
316 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
317 // CHECK1-NEXT:    [[K_CASTED:%.*]] = alloca i64, align 8
318 // CHECK1-NEXT:    [[LIN:%.*]] = alloca i32, align 4
319 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
320 // CHECK1-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
321 // CHECK1-NEXT:    [[A_CASTED4:%.*]] = alloca i64, align 8
322 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
323 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
324 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
325 // CHECK1-NEXT:    [[A_CASTED6:%.*]] = alloca i64, align 8
326 // CHECK1-NEXT:    [[AA_CASTED8:%.*]] = alloca i64, align 8
327 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8
328 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8
329 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8
330 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
331 // CHECK1-NEXT:    [[A_CASTED15:%.*]] = alloca i64, align 8
332 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
333 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8
334 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8
335 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8
336 // CHECK1-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8
337 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
338 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
339 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
340 // CHECK1-NEXT:    store i16 0, i16* [[AA]], align 2
341 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
342 // CHECK1-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
343 // CHECK1-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
344 // CHECK1-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
345 // CHECK1-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
346 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
347 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
348 // CHECK1-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
349 // CHECK1-NEXT:    [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
350 // CHECK1-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
351 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
352 // CHECK1-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
353 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates*
354 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0
355 // CHECK1-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]])
356 // CHECK1-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
357 // CHECK1-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
358 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A]], align 4
359 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
360 // CHECK1-NEXT:    store i32 [[TMP11]], i32* [[CONV]], align 4
361 // CHECK1-NEXT:    [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8
362 // CHECK1-NEXT:    [[TMP13:%.*]] = load i64, i64* [[K]], align 8
363 // CHECK1-NEXT:    store i64 [[TMP13]], i64* [[K_CASTED]], align 8
364 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8
365 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]]
366 // CHECK1-NEXT:    store i32 12, i32* [[LIN]], align 4
367 // CHECK1-NEXT:    [[TMP15:%.*]] = load i16, i16* [[AA]], align 2
368 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
369 // CHECK1-NEXT:    store i16 [[TMP15]], i16* [[CONV2]], align 2
370 // CHECK1-NEXT:    [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8
371 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4
372 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
373 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[CONV3]], align 4
374 // CHECK1-NEXT:    [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
375 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A]], align 4
376 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32*
377 // CHECK1-NEXT:    store i32 [[TMP19]], i32* [[CONV5]], align 4
378 // CHECK1-NEXT:    [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8
379 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
380 // CHECK1-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
381 // CHECK1-NEXT:    store i64 [[TMP16]], i64* [[TMP22]], align 8
382 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
383 // CHECK1-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64*
384 // CHECK1-NEXT:    store i64 [[TMP16]], i64* [[TMP24]], align 8
385 // CHECK1-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
386 // CHECK1-NEXT:    store i8* null, i8** [[TMP25]], align 8
387 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
388 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
389 // CHECK1-NEXT:    store i64 [[TMP18]], i64* [[TMP27]], align 8
390 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
391 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
392 // CHECK1-NEXT:    store i64 [[TMP18]], i64* [[TMP29]], align 8
393 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
394 // CHECK1-NEXT:    store i8* null, i8** [[TMP30]], align 8
395 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
396 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
397 // CHECK1-NEXT:    store i64 [[TMP20]], i64* [[TMP32]], align 8
398 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
399 // CHECK1-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64*
400 // CHECK1-NEXT:    store i64 [[TMP20]], i64* [[TMP34]], align 8
401 // CHECK1-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
402 // CHECK1-NEXT:    store i8* null, i8** [[TMP35]], align 8
403 // CHECK1-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
404 // CHECK1-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
405 // CHECK1-NEXT:    [[TMP38:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
406 // CHECK1-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
407 // CHECK1-NEXT:    br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
408 // CHECK1:       omp_offload.failed:
409 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]]
410 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
411 // CHECK1:       omp_offload.cont:
412 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32, i32* [[A]], align 4
413 // CHECK1-NEXT:    [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32*
414 // CHECK1-NEXT:    store i32 [[TMP40]], i32* [[CONV7]], align 4
415 // CHECK1-NEXT:    [[TMP41:%.*]] = load i64, i64* [[A_CASTED6]], align 8
416 // CHECK1-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2
417 // CHECK1-NEXT:    [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16*
418 // CHECK1-NEXT:    store i16 [[TMP42]], i16* [[CONV9]], align 2
419 // CHECK1-NEXT:    [[TMP43:%.*]] = load i64, i64* [[AA_CASTED8]], align 8
420 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[N_ADDR]], align 4
421 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP44]], 10
422 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
423 // CHECK1:       omp_if.then:
424 // CHECK1-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
425 // CHECK1-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64*
426 // CHECK1-NEXT:    store i64 [[TMP41]], i64* [[TMP46]], align 8
427 // CHECK1-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
428 // CHECK1-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64*
429 // CHECK1-NEXT:    store i64 [[TMP41]], i64* [[TMP48]], align 8
430 // CHECK1-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
431 // CHECK1-NEXT:    store i8* null, i8** [[TMP49]], align 8
432 // CHECK1-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1
433 // CHECK1-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64*
434 // CHECK1-NEXT:    store i64 [[TMP43]], i64* [[TMP51]], align 8
435 // CHECK1-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1
436 // CHECK1-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
437 // CHECK1-NEXT:    store i64 [[TMP43]], i64* [[TMP53]], align 8
438 // CHECK1-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1
439 // CHECK1-NEXT:    store i8* null, i8** [[TMP54]], align 8
440 // CHECK1-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
441 // CHECK1-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
442 // CHECK1-NEXT:    [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
443 // CHECK1-NEXT:    [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
444 // CHECK1-NEXT:    br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
445 // CHECK1:       omp_offload.failed13:
446 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]]
447 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT14]]
448 // CHECK1:       omp_offload.cont14:
449 // CHECK1-NEXT:    br label [[OMP_IF_END:%.*]]
450 // CHECK1:       omp_if.else:
451 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]]
452 // CHECK1-NEXT:    br label [[OMP_IF_END]]
453 // CHECK1:       omp_if.end:
454 // CHECK1-NEXT:    [[TMP59:%.*]] = load i32, i32* [[A]], align 4
455 // CHECK1-NEXT:    store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR_]], align 4
456 // CHECK1-NEXT:    [[TMP60:%.*]] = load i32, i32* [[A]], align 4
457 // CHECK1-NEXT:    [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32*
458 // CHECK1-NEXT:    store i32 [[TMP60]], i32* [[CONV16]], align 4
459 // CHECK1-NEXT:    [[TMP61:%.*]] = load i64, i64* [[A_CASTED15]], align 8
460 // CHECK1-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
461 // CHECK1-NEXT:    [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
462 // CHECK1-NEXT:    store i32 [[TMP62]], i32* [[CONV17]], align 4
463 // CHECK1-NEXT:    [[TMP63:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
464 // CHECK1-NEXT:    [[TMP64:%.*]] = load i32, i32* [[N_ADDR]], align 4
465 // CHECK1-NEXT:    [[CMP18:%.*]] = icmp sgt i32 [[TMP64]], 20
466 // CHECK1-NEXT:    br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]]
467 // CHECK1:       omp_if.then19:
468 // CHECK1-NEXT:    [[TMP65:%.*]] = mul nuw i64 [[TMP2]], 4
469 // CHECK1-NEXT:    [[TMP66:%.*]] = mul nuw i64 5, [[TMP5]]
470 // CHECK1-NEXT:    [[TMP67:%.*]] = mul nuw i64 [[TMP66]], 8
471 // CHECK1-NEXT:    [[TMP68:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
472 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP68]], i8* align 8 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i64 80, i1 false)
473 // CHECK1-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
474 // CHECK1-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
475 // CHECK1-NEXT:    store i64 [[TMP61]], i64* [[TMP70]], align 8
476 // CHECK1-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
477 // CHECK1-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
478 // CHECK1-NEXT:    store i64 [[TMP61]], i64* [[TMP72]], align 8
479 // CHECK1-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
480 // CHECK1-NEXT:    store i8* null, i8** [[TMP73]], align 8
481 // CHECK1-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
482 // CHECK1-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]**
483 // CHECK1-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8
484 // CHECK1-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
485 // CHECK1-NEXT:    [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]**
486 // CHECK1-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8
487 // CHECK1-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
488 // CHECK1-NEXT:    store i8* null, i8** [[TMP78]], align 8
489 // CHECK1-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
490 // CHECK1-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64*
491 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP80]], align 8
492 // CHECK1-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
493 // CHECK1-NEXT:    [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64*
494 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP82]], align 8
495 // CHECK1-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
496 // CHECK1-NEXT:    store i8* null, i8** [[TMP83]], align 8
497 // CHECK1-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
498 // CHECK1-NEXT:    [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float**
499 // CHECK1-NEXT:    store float* [[VLA]], float** [[TMP85]], align 8
500 // CHECK1-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
501 // CHECK1-NEXT:    [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float**
502 // CHECK1-NEXT:    store float* [[VLA]], float** [[TMP87]], align 8
503 // CHECK1-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
504 // CHECK1-NEXT:    store i64 [[TMP65]], i64* [[TMP88]], align 8
505 // CHECK1-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
506 // CHECK1-NEXT:    store i8* null, i8** [[TMP89]], align 8
507 // CHECK1-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
508 // CHECK1-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]**
509 // CHECK1-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 8
510 // CHECK1-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
511 // CHECK1-NEXT:    [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]**
512 // CHECK1-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8
513 // CHECK1-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
514 // CHECK1-NEXT:    store i8* null, i8** [[TMP94]], align 8
515 // CHECK1-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5
516 // CHECK1-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64*
517 // CHECK1-NEXT:    store i64 5, i64* [[TMP96]], align 8
518 // CHECK1-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5
519 // CHECK1-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64*
520 // CHECK1-NEXT:    store i64 5, i64* [[TMP98]], align 8
521 // CHECK1-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5
522 // CHECK1-NEXT:    store i8* null, i8** [[TMP99]], align 8
523 // CHECK1-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6
524 // CHECK1-NEXT:    [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64*
525 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP101]], align 8
526 // CHECK1-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6
527 // CHECK1-NEXT:    [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i64*
528 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP103]], align 8
529 // CHECK1-NEXT:    [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6
530 // CHECK1-NEXT:    store i8* null, i8** [[TMP104]], align 8
531 // CHECK1-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7
532 // CHECK1-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double**
533 // CHECK1-NEXT:    store double* [[VLA1]], double** [[TMP106]], align 8
534 // CHECK1-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7
535 // CHECK1-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to double**
536 // CHECK1-NEXT:    store double* [[VLA1]], double** [[TMP108]], align 8
537 // CHECK1-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
538 // CHECK1-NEXT:    store i64 [[TMP67]], i64* [[TMP109]], align 8
539 // CHECK1-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7
540 // CHECK1-NEXT:    store i8* null, i8** [[TMP110]], align 8
541 // CHECK1-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8
542 // CHECK1-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to %struct.TT**
543 // CHECK1-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP112]], align 8
544 // CHECK1-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8
545 // CHECK1-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to %struct.TT**
546 // CHECK1-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP114]], align 8
547 // CHECK1-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8
548 // CHECK1-NEXT:    store i8* null, i8** [[TMP115]], align 8
549 // CHECK1-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9
550 // CHECK1-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64*
551 // CHECK1-NEXT:    store i64 [[TMP63]], i64* [[TMP117]], align 8
552 // CHECK1-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9
553 // CHECK1-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i64*
554 // CHECK1-NEXT:    store i64 [[TMP63]], i64* [[TMP119]], align 8
555 // CHECK1-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9
556 // CHECK1-NEXT:    store i8* null, i8** [[TMP120]], align 8
557 // CHECK1-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
558 // CHECK1-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
559 // CHECK1-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
560 // CHECK1-NEXT:    [[TMP124:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP121]], i8** [[TMP122]], i64* [[TMP123]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
561 // CHECK1-NEXT:    [[TMP125:%.*]] = icmp ne i32 [[TMP124]], 0
562 // CHECK1-NEXT:    br i1 [[TMP125]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]]
563 // CHECK1:       omp_offload.failed23:
564 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]]
565 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT24]]
566 // CHECK1:       omp_offload.cont24:
567 // CHECK1-NEXT:    br label [[OMP_IF_END26:%.*]]
568 // CHECK1:       omp_if.else25:
569 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]]
570 // CHECK1-NEXT:    br label [[OMP_IF_END26]]
571 // CHECK1:       omp_if.end26:
572 // CHECK1-NEXT:    [[TMP126:%.*]] = load i32, i32* [[A]], align 4
573 // CHECK1-NEXT:    [[TMP127:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
574 // CHECK1-NEXT:    call void @llvm.stackrestore(i8* [[TMP127]])
575 // CHECK1-NEXT:    ret i32 [[TMP126]]
576 //
577 //
578 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
579 // CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
580 // CHECK1-NEXT:  entry:
581 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
582 // CHECK1-NEXT:    ret void
583 //
584 //
585 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
586 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
587 // CHECK1-NEXT:  entry:
588 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
589 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
590 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
591 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
592 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
593 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
594 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
595 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
596 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
597 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
598 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
599 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
600 // CHECK1-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
601 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
602 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
603 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
604 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
605 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
606 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
607 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
608 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
609 // CHECK1:       cond.true:
610 // CHECK1-NEXT:    br label [[COND_END:%.*]]
611 // CHECK1:       cond.false:
612 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
613 // CHECK1-NEXT:    br label [[COND_END]]
614 // CHECK1:       cond.end:
615 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
616 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
617 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
618 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
619 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
620 // CHECK1:       omp.inner.for.cond:
621 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
622 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
623 // CHECK1-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
624 // CHECK1-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
625 // CHECK1:       omp.inner.for.body:
626 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
627 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
628 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
629 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
630 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
631 // CHECK1:       omp.body.continue:
632 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
633 // CHECK1:       omp.inner.for.inc:
634 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
635 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
636 // CHECK1-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
637 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
638 // CHECK1:       omp.inner.for.end:
639 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
640 // CHECK1:       omp.loop.exit:
641 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
642 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
643 // CHECK1-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
644 // CHECK1-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
645 // CHECK1:       .omp.final.then:
646 // CHECK1-NEXT:    store i32 33, i32* [[I]], align 4
647 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
648 // CHECK1:       .omp.final.done:
649 // CHECK1-NEXT:    ret void
650 //
651 //
652 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry.
653 // CHECK1-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
654 // CHECK1-NEXT:  entry:
655 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
656 // CHECK1-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
657 // CHECK1-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
658 // CHECK1-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
659 // CHECK1-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
660 // CHECK1-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
661 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
662 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
663 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
664 // CHECK1-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
665 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
666 // CHECK1-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
667 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
668 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
669 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
670 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
671 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
672 // CHECK1-NEXT:    [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
673 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]])
674 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]])
675 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
676 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]])
677 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25
678 // CHECK1-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25
679 // CHECK1-NEXT:    store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25
680 // CHECK1-NEXT:    store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25
681 // CHECK1-NEXT:    store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25
682 // CHECK1-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25
683 // CHECK1-NEXT:    [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25
684 // CHECK1-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
685 // CHECK1-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
686 // CHECK1-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
687 // CHECK1:       omp_offload.failed.i:
688 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]]
689 // CHECK1-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
690 // CHECK1:       .omp_outlined..1.exit:
691 // CHECK1-NEXT:    ret i32 0
692 //
693 //
694 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101
695 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] {
696 // CHECK1-NEXT:  entry:
697 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
698 // CHECK1-NEXT:    [[K_ADDR:%.*]] = alloca i64, align 8
699 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
700 // CHECK1-NEXT:    [[K_CASTED:%.*]] = alloca i64, align 8
701 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
702 // CHECK1-NEXT:    store i64 [[K]], i64* [[K_ADDR]], align 8
703 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
704 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
705 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
706 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[CONV1]], align 4
707 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
708 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8
709 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[K_CASTED]], align 8
710 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8
711 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
712 // CHECK1-NEXT:    ret void
713 //
714 //
715 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
716 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] {
717 // CHECK1-NEXT:  entry:
718 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
719 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
720 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
721 // CHECK1-NEXT:    [[K_ADDR:%.*]] = alloca i64, align 8
722 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
723 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
724 // CHECK1-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
725 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
726 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
727 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
728 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
729 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
730 // CHECK1-NEXT:    [[K1:%.*]] = alloca i64, align 8
731 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
732 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
733 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
734 // CHECK1-NEXT:    store i64 [[K]], i64* [[K_ADDR]], align 8
735 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
736 // CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8
737 // CHECK1-NEXT:    store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8
738 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
739 // CHECK1-NEXT:    store i32 8, i32* [[DOTOMP_UB]], align 4
740 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
741 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
742 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
743 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
744 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]])
745 // CHECK1-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 35, i32 0, i32 8, i32 1, i32 1)
746 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
747 // CHECK1:       omp.dispatch.cond:
748 // CHECK1-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
749 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0
750 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
751 // CHECK1:       omp.dispatch.body:
752 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
753 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
754 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
755 // CHECK1:       omp.inner.for.cond:
756 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
757 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
758 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
759 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
760 // CHECK1:       omp.inner.for.body:
761 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
762 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
763 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL]]
764 // CHECK1-NEXT:    store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !26
765 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !26
766 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
767 // CHECK1-NEXT:    [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3
768 // CHECK1-NEXT:    [[CONV3:%.*]] = sext i32 [[MUL2]] to i64
769 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]]
770 // CHECK1-NEXT:    store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !26
771 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !26
772 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1
773 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !26
774 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
775 // CHECK1:       omp.body.continue:
776 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
777 // CHECK1:       omp.inner.for.inc:
778 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
779 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1
780 // CHECK1-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
781 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
782 // CHECK1:       omp.inner.for.end:
783 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
784 // CHECK1:       omp.dispatch.inc:
785 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
786 // CHECK1:       omp.dispatch.end:
787 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
788 // CHECK1-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
789 // CHECK1-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
790 // CHECK1:       .omp.final.then:
791 // CHECK1-NEXT:    store i32 1, i32* [[I]], align 4
792 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
793 // CHECK1:       .omp.final.done:
794 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
795 // CHECK1-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
796 // CHECK1-NEXT:    br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
797 // CHECK1:       .omp.linear.pu:
798 // CHECK1-NEXT:    [[TMP16:%.*]] = load i64, i64* [[K1]], align 8
799 // CHECK1-NEXT:    store i64 [[TMP16]], i64* [[K_ADDR]], align 8
800 // CHECK1-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
801 // CHECK1:       .omp.linear.pu.done:
802 // CHECK1-NEXT:    ret void
803 //
804 //
805 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
806 // CHECK1-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR2]] {
807 // CHECK1-NEXT:  entry:
808 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
809 // CHECK1-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
810 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
811 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
812 // CHECK1-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
813 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
814 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
815 // CHECK1-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
816 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
817 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
818 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
819 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
820 // CHECK1-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
821 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
822 // CHECK1-NEXT:    store i16 [[TMP0]], i16* [[CONV3]], align 2
823 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
824 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
825 // CHECK1-NEXT:    [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
826 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[CONV4]], align 4
827 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
828 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4
829 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
830 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[CONV5]], align 4
831 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8
832 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]])
833 // CHECK1-NEXT:    ret void
834 //
835 //
836 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
837 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
838 // CHECK1-NEXT:  entry:
839 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
840 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
841 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
842 // CHECK1-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
843 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
844 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
845 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i64, align 8
846 // CHECK1-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
847 // CHECK1-NEXT:    [[DOTLINEAR_START3:%.*]] = alloca i32, align 4
848 // CHECK1-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
849 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
850 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
851 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
852 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
853 // CHECK1-NEXT:    [[IT:%.*]] = alloca i64, align 8
854 // CHECK1-NEXT:    [[LIN4:%.*]] = alloca i32, align 4
855 // CHECK1-NEXT:    [[A5:%.*]] = alloca i32, align 4
856 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
857 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
858 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
859 // CHECK1-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
860 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
861 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
862 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
863 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
864 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4
865 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
866 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4
867 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4
868 // CHECK1-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
869 // CHECK1-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
870 // CHECK1-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
871 // CHECK1-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
872 // CHECK1-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
873 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
874 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
875 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
876 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]])
877 // CHECK1-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
878 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
879 // CHECK1-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
880 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
881 // CHECK1:       cond.true:
882 // CHECK1-NEXT:    br label [[COND_END:%.*]]
883 // CHECK1:       cond.false:
884 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
885 // CHECK1-NEXT:    br label [[COND_END]]
886 // CHECK1:       cond.end:
887 // CHECK1-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
888 // CHECK1-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
889 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
890 // CHECK1-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
891 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
892 // CHECK1:       omp.inner.for.cond:
893 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
894 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29
895 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
896 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
897 // CHECK1:       omp.inner.for.body:
898 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
899 // CHECK1-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
900 // CHECK1-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
901 // CHECK1-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !29
902 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !29
903 // CHECK1-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP10]] to i64
904 // CHECK1-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
905 // CHECK1-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29
906 // CHECK1-NEXT:    [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]]
907 // CHECK1-NEXT:    [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]]
908 // CHECK1-NEXT:    [[CONV9:%.*]] = trunc i64 [[ADD]] to i32
909 // CHECK1-NEXT:    store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !29
910 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !29
911 // CHECK1-NEXT:    [[CONV10:%.*]] = sext i32 [[TMP13]] to i64
912 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
913 // CHECK1-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29
914 // CHECK1-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]]
915 // CHECK1-NEXT:    [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]]
916 // CHECK1-NEXT:    [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32
917 // CHECK1-NEXT:    store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !29
918 // CHECK1-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !29
919 // CHECK1-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP16]] to i32
920 // CHECK1-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
921 // CHECK1-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
922 // CHECK1-NEXT:    store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group !29
923 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
924 // CHECK1:       omp.body.continue:
925 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
926 // CHECK1:       omp.inner.for.inc:
927 // CHECK1-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
928 // CHECK1-NEXT:    [[ADD17:%.*]] = add i64 [[TMP17]], 1
929 // CHECK1-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
930 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
931 // CHECK1:       omp.inner.for.end:
932 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
933 // CHECK1:       omp.loop.exit:
934 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
935 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
936 // CHECK1-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
937 // CHECK1-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
938 // CHECK1:       .omp.final.then:
939 // CHECK1-NEXT:    store i64 400, i64* [[IT]], align 8
940 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
941 // CHECK1:       .omp.final.done:
942 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
943 // CHECK1-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
944 // CHECK1-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
945 // CHECK1:       .omp.linear.pu:
946 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4
947 // CHECK1-NEXT:    store i32 [[TMP22]], i32* [[CONV1]], align 4
948 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A5]], align 4
949 // CHECK1-NEXT:    store i32 [[TMP23]], i32* [[CONV2]], align 4
950 // CHECK1-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
951 // CHECK1:       .omp.linear.pu.done:
952 // CHECK1-NEXT:    ret void
953 //
954 //
955 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
956 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] {
957 // CHECK1-NEXT:  entry:
958 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
959 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
960 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
961 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
962 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
963 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
964 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
965 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
966 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
967 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
968 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
969 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
970 // CHECK1-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
971 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
972 // CHECK1-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
973 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
974 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
975 // CHECK1-NEXT:    ret void
976 //
977 //
978 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
979 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
980 // CHECK1-NEXT:  entry:
981 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
982 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
983 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
984 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
985 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
986 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i16, align 2
987 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
988 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
989 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
990 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
991 // CHECK1-NEXT:    [[IT:%.*]] = alloca i16, align 2
992 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
993 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
994 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
995 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
996 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
997 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
998 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
999 // CHECK1-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
1000 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1001 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1002 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1003 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1004 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1005 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1006 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
1007 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1008 // CHECK1:       cond.true:
1009 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1010 // CHECK1:       cond.false:
1011 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1012 // CHECK1-NEXT:    br label [[COND_END]]
1013 // CHECK1:       cond.end:
1014 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1015 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1016 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1017 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
1018 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1019 // CHECK1:       omp.inner.for.cond:
1020 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1021 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
1022 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1023 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1024 // CHECK1:       omp.inner.for.body:
1025 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1026 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
1027 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
1028 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
1029 // CHECK1-NEXT:    store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !32
1030 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !32
1031 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
1032 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !32
1033 // CHECK1-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !32
1034 // CHECK1-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
1035 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
1036 // CHECK1-NEXT:    [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
1037 // CHECK1-NEXT:    store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group !32
1038 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1039 // CHECK1:       omp.body.continue:
1040 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1041 // CHECK1:       omp.inner.for.inc:
1042 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1043 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
1044 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
1045 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
1046 // CHECK1:       omp.inner.for.end:
1047 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1048 // CHECK1:       omp.loop.exit:
1049 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1050 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1051 // CHECK1-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
1052 // CHECK1-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1053 // CHECK1:       .omp.final.then:
1054 // CHECK1-NEXT:    store i16 22, i16* [[IT]], align 2
1055 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1056 // CHECK1:       .omp.final.done:
1057 // CHECK1-NEXT:    ret void
1058 //
1059 //
1060 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
1061 // CHECK1-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
1062 // CHECK1-NEXT:  entry:
1063 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1064 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
1065 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1066 // CHECK1-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
1067 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
1068 // CHECK1-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
1069 // CHECK1-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
1070 // CHECK1-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
1071 // CHECK1-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
1072 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
1073 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1074 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
1075 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1076 // CHECK1-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
1077 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1078 // CHECK1-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
1079 // CHECK1-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
1080 // CHECK1-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
1081 // CHECK1-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
1082 // CHECK1-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
1083 // CHECK1-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
1084 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
1085 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1086 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
1087 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1088 // CHECK1-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
1089 // CHECK1-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
1090 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
1091 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
1092 // CHECK1-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
1093 // CHECK1-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
1094 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
1095 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
1096 // CHECK1-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1097 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[CONV6]], align 4
1098 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
1099 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4
1100 // CHECK1-NEXT:    [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
1101 // CHECK1-NEXT:    store i32 [[TMP10]], i32* [[CONV7]], align 4
1102 // CHECK1-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
1103 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]])
1104 // CHECK1-NEXT:    ret void
1105 //
1106 //
1107 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
1108 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
1109 // CHECK1-NEXT:  entry:
1110 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1111 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1112 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1113 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
1114 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1115 // CHECK1-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
1116 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
1117 // CHECK1-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
1118 // CHECK1-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
1119 // CHECK1-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
1120 // CHECK1-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
1121 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
1122 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1123 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i8, align 1
1124 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1125 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1126 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1127 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1128 // CHECK1-NEXT:    [[IT:%.*]] = alloca i8, align 1
1129 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1130 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1131 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1132 // CHECK1-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
1133 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1134 // CHECK1-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
1135 // CHECK1-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
1136 // CHECK1-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
1137 // CHECK1-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
1138 // CHECK1-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
1139 // CHECK1-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
1140 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
1141 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1142 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
1143 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1144 // CHECK1-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
1145 // CHECK1-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
1146 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
1147 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
1148 // CHECK1-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
1149 // CHECK1-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
1150 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
1151 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1152 // CHECK1-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
1153 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1154 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1155 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4
1156 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1157 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
1158 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
1159 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
1160 // CHECK1:       omp.dispatch.cond:
1161 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1162 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
1163 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1164 // CHECK1:       cond.true:
1165 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1166 // CHECK1:       cond.false:
1167 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1168 // CHECK1-NEXT:    br label [[COND_END]]
1169 // CHECK1:       cond.end:
1170 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
1171 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1172 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1173 // CHECK1-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
1174 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1175 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1176 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
1177 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
1178 // CHECK1:       omp.dispatch.body:
1179 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1180 // CHECK1:       omp.inner.for.cond:
1181 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1182 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
1183 // CHECK1-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
1184 // CHECK1-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1185 // CHECK1:       omp.inner.for.body:
1186 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1187 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
1188 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
1189 // CHECK1-NEXT:    [[CONV8:%.*]] = trunc i32 [[SUB]] to i8
1190 // CHECK1-NEXT:    store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !35
1191 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !35
1192 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
1193 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group !35
1194 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
1195 // CHECK1-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35
1196 // CHECK1-NEXT:    [[CONV9:%.*]] = fpext float [[TMP20]] to double
1197 // CHECK1-NEXT:    [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
1198 // CHECK1-NEXT:    [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
1199 // CHECK1-NEXT:    store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35
1200 // CHECK1-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
1201 // CHECK1-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !35
1202 // CHECK1-NEXT:    [[CONV13:%.*]] = fpext float [[TMP21]] to double
1203 // CHECK1-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
1204 // CHECK1-NEXT:    [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
1205 // CHECK1-NEXT:    store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !35
1206 // CHECK1-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
1207 // CHECK1-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
1208 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !35
1209 // CHECK1-NEXT:    [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00
1210 // CHECK1-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !35
1211 // CHECK1-NEXT:    [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]]
1212 // CHECK1-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]]
1213 // CHECK1-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
1214 // CHECK1-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !35
1215 // CHECK1-NEXT:    [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00
1216 // CHECK1-NEXT:    store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !35
1217 // CHECK1-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
1218 // CHECK1-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35
1219 // CHECK1-NEXT:    [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1
1220 // CHECK1-NEXT:    store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !35
1221 // CHECK1-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
1222 // CHECK1-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35
1223 // CHECK1-NEXT:    [[CONV23:%.*]] = sext i8 [[TMP26]] to i32
1224 // CHECK1-NEXT:    [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
1225 // CHECK1-NEXT:    [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
1226 // CHECK1-NEXT:    store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !35
1227 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1228 // CHECK1:       omp.body.continue:
1229 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1230 // CHECK1:       omp.inner.for.inc:
1231 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1232 // CHECK1-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1
1233 // CHECK1-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
1234 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
1235 // CHECK1:       omp.inner.for.end:
1236 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
1237 // CHECK1:       omp.dispatch.inc:
1238 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1239 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1240 // CHECK1-NEXT:    [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
1241 // CHECK1-NEXT:    store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4
1242 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1243 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1244 // CHECK1-NEXT:    [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
1245 // CHECK1-NEXT:    store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4
1246 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
1247 // CHECK1:       omp.dispatch.end:
1248 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
1249 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1250 // CHECK1-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
1251 // CHECK1-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1252 // CHECK1:       .omp.final.then:
1253 // CHECK1-NEXT:    store i8 96, i8* [[IT]], align 1
1254 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1255 // CHECK1:       .omp.final.done:
1256 // CHECK1-NEXT:    ret void
1257 //
1258 //
1259 // CHECK1-LABEL: define {{[^@]+}}@_Z3bari
1260 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
1261 // CHECK1-NEXT:  entry:
1262 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1263 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
1264 // CHECK1-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
1265 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1266 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
1267 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
1268 // CHECK1-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
1269 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
1270 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
1271 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
1272 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
1273 // CHECK1-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
1274 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
1275 // CHECK1-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
1276 // CHECK1-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
1277 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
1278 // CHECK1-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
1279 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
1280 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
1281 // CHECK1-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
1282 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
1283 // CHECK1-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
1284 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
1285 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
1286 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
1287 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
1288 // CHECK1-NEXT:    ret i32 [[TMP8]]
1289 //
1290 //
1291 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
1292 // CHECK1-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
1293 // CHECK1-NEXT:  entry:
1294 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1295 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1296 // CHECK1-NEXT:    [[B:%.*]] = alloca i32, align 4
1297 // CHECK1-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
1298 // CHECK1-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
1299 // CHECK1-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
1300 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
1301 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
1302 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
1303 // CHECK1-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
1304 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1305 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1306 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1307 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
1308 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
1309 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
1310 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1311 // CHECK1-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
1312 // CHECK1-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
1313 // CHECK1-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
1314 // CHECK1-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
1315 // CHECK1-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
1316 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
1317 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B]], align 4
1318 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
1319 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[CONV]], align 4
1320 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
1321 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4
1322 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60
1323 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
1324 // CHECK1:       omp_if.then:
1325 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
1326 // CHECK1-NEXT:    [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]]
1327 // CHECK1-NEXT:    [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
1328 // CHECK1-NEXT:    [[TMP10:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
1329 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP10]], i8* align 8 bitcast ([5 x i64]* @.offload_sizes.11 to i8*), i64 40, i1 false)
1330 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1331 // CHECK1-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to %struct.S1**
1332 // CHECK1-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP12]], align 8
1333 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1334 // CHECK1-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to double**
1335 // CHECK1-NEXT:    store double* [[A]], double** [[TMP14]], align 8
1336 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1337 // CHECK1-NEXT:    store i8* null, i8** [[TMP15]], align 8
1338 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1339 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
1340 // CHECK1-NEXT:    store i64 [[TMP6]], i64* [[TMP17]], align 8
1341 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1342 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
1343 // CHECK1-NEXT:    store i64 [[TMP6]], i64* [[TMP19]], align 8
1344 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1345 // CHECK1-NEXT:    store i8* null, i8** [[TMP20]], align 8
1346 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1347 // CHECK1-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
1348 // CHECK1-NEXT:    store i64 2, i64* [[TMP22]], align 8
1349 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1350 // CHECK1-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64*
1351 // CHECK1-NEXT:    store i64 2, i64* [[TMP24]], align 8
1352 // CHECK1-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1353 // CHECK1-NEXT:    store i8* null, i8** [[TMP25]], align 8
1354 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
1355 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
1356 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP27]], align 8
1357 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
1358 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
1359 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP29]], align 8
1360 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
1361 // CHECK1-NEXT:    store i8* null, i8** [[TMP30]], align 8
1362 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
1363 // CHECK1-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i16**
1364 // CHECK1-NEXT:    store i16* [[VLA]], i16** [[TMP32]], align 8
1365 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
1366 // CHECK1-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16**
1367 // CHECK1-NEXT:    store i16* [[VLA]], i16** [[TMP34]], align 8
1368 // CHECK1-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
1369 // CHECK1-NEXT:    store i64 [[TMP9]], i64* [[TMP35]], align 8
1370 // CHECK1-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
1371 // CHECK1-NEXT:    store i8* null, i8** [[TMP36]], align 8
1372 // CHECK1-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1373 // CHECK1-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1374 // CHECK1-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
1375 // CHECK1-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* [[TMP39]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
1376 // CHECK1-NEXT:    [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
1377 // CHECK1-NEXT:    br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1378 // CHECK1:       omp_offload.failed:
1379 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]]
1380 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1381 // CHECK1:       omp_offload.cont:
1382 // CHECK1-NEXT:    br label [[OMP_IF_END:%.*]]
1383 // CHECK1:       omp_if.else:
1384 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]]
1385 // CHECK1-NEXT:    br label [[OMP_IF_END]]
1386 // CHECK1:       omp_if.end:
1387 // CHECK1-NEXT:    [[TMP42:%.*]] = mul nsw i64 1, [[TMP2]]
1388 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP42]]
1389 // CHECK1-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
1390 // CHECK1-NEXT:    [[TMP43:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
1391 // CHECK1-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP43]] to i32
1392 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[B]], align 4
1393 // CHECK1-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP44]]
1394 // CHECK1-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
1395 // CHECK1-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
1396 // CHECK1-NEXT:    ret i32 [[ADD4]]
1397 //
1398 //
1399 // CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici
1400 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
1401 // CHECK1-NEXT:  entry:
1402 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1403 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
1404 // CHECK1-NEXT:    [[AA:%.*]] = alloca i16, align 2
1405 // CHECK1-NEXT:    [[AAA:%.*]] = alloca i8, align 1
1406 // CHECK1-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
1407 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1408 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1409 // CHECK1-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
1410 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
1411 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
1412 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
1413 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1414 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
1415 // CHECK1-NEXT:    store i16 0, i16* [[AA]], align 2
1416 // CHECK1-NEXT:    store i8 0, i8* [[AAA]], align 1
1417 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
1418 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1419 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
1420 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
1421 // CHECK1-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
1422 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1423 // CHECK1-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
1424 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1425 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
1426 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
1427 // CHECK1-NEXT:    store i8 [[TMP4]], i8* [[CONV2]], align 1
1428 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
1429 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
1430 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
1431 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
1432 // CHECK1:       omp_if.then:
1433 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1434 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
1435 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
1436 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1437 // CHECK1-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
1438 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
1439 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1440 // CHECK1-NEXT:    store i8* null, i8** [[TMP11]], align 8
1441 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1442 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
1443 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
1444 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1445 // CHECK1-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
1446 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
1447 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1448 // CHECK1-NEXT:    store i8* null, i8** [[TMP16]], align 8
1449 // CHECK1-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1450 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
1451 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP18]], align 8
1452 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1453 // CHECK1-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
1454 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[TMP20]], align 8
1455 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1456 // CHECK1-NEXT:    store i8* null, i8** [[TMP21]], align 8
1457 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
1458 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
1459 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8
1460 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
1461 // CHECK1-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
1462 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8
1463 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
1464 // CHECK1-NEXT:    store i8* null, i8** [[TMP26]], align 8
1465 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1466 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1467 // CHECK1-NEXT:    [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
1468 // CHECK1-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
1469 // CHECK1-NEXT:    br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1470 // CHECK1:       omp_offload.failed:
1471 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
1472 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1473 // CHECK1:       omp_offload.cont:
1474 // CHECK1-NEXT:    br label [[OMP_IF_END:%.*]]
1475 // CHECK1:       omp_if.else:
1476 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
1477 // CHECK1-NEXT:    br label [[OMP_IF_END]]
1478 // CHECK1:       omp_if.end:
1479 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[A]], align 4
1480 // CHECK1-NEXT:    ret i32 [[TMP31]]
1481 //
1482 //
1483 // CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
1484 // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
1485 // CHECK1-NEXT:  entry:
1486 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1487 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
1488 // CHECK1-NEXT:    [[AA:%.*]] = alloca i16, align 2
1489 // CHECK1-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
1490 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1491 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1492 // CHECK1-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
1493 // CHECK1-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
1494 // CHECK1-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
1495 // CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1496 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
1497 // CHECK1-NEXT:    store i16 0, i16* [[AA]], align 2
1498 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
1499 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1500 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
1501 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
1502 // CHECK1-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
1503 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1504 // CHECK1-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
1505 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1506 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
1507 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
1508 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
1509 // CHECK1:       omp_if.then:
1510 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1511 // CHECK1-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
1512 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
1513 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1514 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
1515 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
1516 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1517 // CHECK1-NEXT:    store i8* null, i8** [[TMP9]], align 8
1518 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1519 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
1520 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
1521 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1522 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
1523 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
1524 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1525 // CHECK1-NEXT:    store i8* null, i8** [[TMP14]], align 8
1526 // CHECK1-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1527 // CHECK1-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
1528 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
1529 // CHECK1-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1530 // CHECK1-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
1531 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
1532 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1533 // CHECK1-NEXT:    store i8* null, i8** [[TMP19]], align 8
1534 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1535 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1536 // CHECK1-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
1537 // CHECK1-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
1538 // CHECK1-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1539 // CHECK1:       omp_offload.failed:
1540 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
1541 // CHECK1-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1542 // CHECK1:       omp_offload.cont:
1543 // CHECK1-NEXT:    br label [[OMP_IF_END:%.*]]
1544 // CHECK1:       omp_if.else:
1545 // CHECK1-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
1546 // CHECK1-NEXT:    br label [[OMP_IF_END]]
1547 // CHECK1:       omp_if.end:
1548 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
1549 // CHECK1-NEXT:    ret i32 [[TMP24]]
1550 //
1551 //
1552 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
1553 // CHECK1-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
1554 // CHECK1-NEXT:  entry:
1555 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1556 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
1557 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1558 // CHECK1-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
1559 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
1560 // CHECK1-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
1561 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1562 // CHECK1-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
1563 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1564 // CHECK1-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
1565 // CHECK1-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
1566 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1567 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
1568 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1569 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
1570 // CHECK1-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
1571 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
1572 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
1573 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
1574 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
1575 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
1576 // CHECK1-NEXT:    ret void
1577 //
1578 //
1579 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
1580 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] {
1581 // CHECK1-NEXT:  entry:
1582 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1583 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1584 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
1585 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
1586 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1587 // CHECK1-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
1588 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
1589 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
1590 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i64, align 8
1591 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
1592 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
1593 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
1594 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1595 // CHECK1-NEXT:    [[IT:%.*]] = alloca i64, align 8
1596 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1597 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1598 // CHECK1-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
1599 // CHECK1-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
1600 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1601 // CHECK1-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
1602 // CHECK1-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
1603 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
1604 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
1605 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1606 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
1607 // CHECK1-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
1608 // CHECK1-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
1609 // CHECK1-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
1610 // CHECK1-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
1611 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1612 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1613 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
1614 // CHECK1-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
1615 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
1616 // CHECK1-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3
1617 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1618 // CHECK1:       cond.true:
1619 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1620 // CHECK1:       cond.false:
1621 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
1622 // CHECK1-NEXT:    br label [[COND_END]]
1623 // CHECK1:       cond.end:
1624 // CHECK1-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
1625 // CHECK1-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
1626 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
1627 // CHECK1-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
1628 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1629 // CHECK1:       omp.inner.for.cond:
1630 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
1631 // CHECK1-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !38
1632 // CHECK1-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]]
1633 // CHECK1-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1634 // CHECK1:       omp.inner.for.body:
1635 // CHECK1-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
1636 // CHECK1-NEXT:    [[MUL:%.*]] = mul i64 [[TMP11]], 400
1637 // CHECK1-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
1638 // CHECK1-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !38
1639 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !38
1640 // CHECK1-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double
1641 // CHECK1-NEXT:    [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00
1642 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
1643 // CHECK1-NEXT:    store double [[ADD]], double* [[A]], align 8, !llvm.access.group !38
1644 // CHECK1-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
1645 // CHECK1-NEXT:    [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !38
1646 // CHECK1-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
1647 // CHECK1-NEXT:    store double [[INC]], double* [[A5]], align 8, !llvm.access.group !38
1648 // CHECK1-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
1649 // CHECK1-NEXT:    [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]]
1650 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]]
1651 // CHECK1-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
1652 // CHECK1-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !38
1653 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1654 // CHECK1:       omp.body.continue:
1655 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1656 // CHECK1:       omp.inner.for.inc:
1657 // CHECK1-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
1658 // CHECK1-NEXT:    [[ADD8:%.*]] = add i64 [[TMP15]], 1
1659 // CHECK1-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
1660 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
1661 // CHECK1:       omp.inner.for.end:
1662 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1663 // CHECK1:       omp.loop.exit:
1664 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
1665 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1666 // CHECK1-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
1667 // CHECK1-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1668 // CHECK1:       .omp.final.then:
1669 // CHECK1-NEXT:    store i64 400, i64* [[IT]], align 8
1670 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1671 // CHECK1:       .omp.final.done:
1672 // CHECK1-NEXT:    ret void
1673 //
1674 //
1675 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
1676 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
1677 // CHECK1-NEXT:  entry:
1678 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1679 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1680 // CHECK1-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
1681 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
1682 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1683 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1684 // CHECK1-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
1685 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1686 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1687 // CHECK1-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
1688 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
1689 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1690 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1691 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
1692 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
1693 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
1694 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1695 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[CONV3]], align 4
1696 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
1697 // CHECK1-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
1698 // CHECK1-NEXT:    [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1699 // CHECK1-NEXT:    store i16 [[TMP3]], i16* [[CONV4]], align 2
1700 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1701 // CHECK1-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1
1702 // CHECK1-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
1703 // CHECK1-NEXT:    store i8 [[TMP5]], i8* [[CONV5]], align 1
1704 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
1705 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
1706 // CHECK1-NEXT:    ret void
1707 //
1708 //
1709 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13
1710 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
1711 // CHECK1-NEXT:  entry:
1712 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1713 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1714 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1715 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1716 // CHECK1-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
1717 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
1718 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1719 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1720 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1721 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1722 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1723 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1724 // CHECK1-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
1725 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
1726 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1727 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1728 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
1729 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
1730 // CHECK1-NEXT:    ret void
1731 //
1732 //
1733 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
1734 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
1735 // CHECK1-NEXT:  entry:
1736 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1737 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1738 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
1739 // CHECK1-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1740 // CHECK1-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1741 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1742 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1743 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
1744 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1745 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1746 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
1747 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
1748 // CHECK1-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1749 // CHECK1-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
1750 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
1751 // CHECK1-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
1752 // CHECK1-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1753 // CHECK1-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
1754 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1755 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
1756 // CHECK1-NEXT:    ret void
1757 //
1758 //
1759 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16
1760 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
1761 // CHECK1-NEXT:  entry:
1762 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1763 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1764 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1765 // CHECK1-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
1766 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
1767 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
1768 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i64, align 8
1769 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
1770 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
1771 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
1772 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1773 // CHECK1-NEXT:    [[I:%.*]] = alloca i64, align 8
1774 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1775 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1776 // CHECK1-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1777 // CHECK1-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
1778 // CHECK1-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
1779 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1780 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
1781 // CHECK1-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
1782 // CHECK1-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
1783 // CHECK1-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
1784 // CHECK1-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
1785 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1786 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1787 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1788 // CHECK1-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
1789 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
1790 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
1791 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1792 // CHECK1:       cond.true:
1793 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1794 // CHECK1:       cond.false:
1795 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
1796 // CHECK1-NEXT:    br label [[COND_END]]
1797 // CHECK1:       cond.end:
1798 // CHECK1-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1799 // CHECK1-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
1800 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
1801 // CHECK1-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
1802 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1803 // CHECK1:       omp.inner.for.cond:
1804 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41
1805 // CHECK1-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !41
1806 // CHECK1-NEXT:    [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
1807 // CHECK1-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1808 // CHECK1:       omp.inner.for.body:
1809 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41
1810 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
1811 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
1812 // CHECK1-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !41
1813 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !41
1814 // CHECK1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
1815 // CHECK1-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !41
1816 // CHECK1-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !41
1817 // CHECK1-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
1818 // CHECK1-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
1819 // CHECK1-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
1820 // CHECK1-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !41
1821 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
1822 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !41
1823 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
1824 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !41
1825 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1826 // CHECK1:       omp.body.continue:
1827 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1828 // CHECK1:       omp.inner.for.inc:
1829 // CHECK1-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41
1830 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1
1831 // CHECK1-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41
1832 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
1833 // CHECK1:       omp.inner.for.end:
1834 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1835 // CHECK1:       omp.loop.exit:
1836 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1837 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1838 // CHECK1-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1839 // CHECK1-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1840 // CHECK1:       .omp.final.then:
1841 // CHECK1-NEXT:    store i64 11, i64* [[I]], align 8
1842 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1843 // CHECK1:       .omp.final.done:
1844 // CHECK1-NEXT:    ret void
1845 //
1846 //
1847 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1848 // CHECK1-SAME: () #[[ATTR8:[0-9]+]] {
1849 // CHECK1-NEXT:  entry:
1850 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
1851 // CHECK1-NEXT:    ret void
1852 //
1853 //
1854 // CHECK2-LABEL: define {{[^@]+}}@_Z7get_valv
1855 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
1856 // CHECK2-NEXT:  entry:
1857 // CHECK2-NEXT:    ret i64 0
1858 //
1859 //
1860 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooi
1861 // CHECK2-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
1862 // CHECK2-NEXT:  entry:
1863 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1864 // CHECK2-NEXT:    [[A:%.*]] = alloca i32, align 4
1865 // CHECK2-NEXT:    [[AA:%.*]] = alloca i16, align 2
1866 // CHECK2-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
1867 // CHECK2-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
1868 // CHECK2-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
1869 // CHECK2-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
1870 // CHECK2-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
1871 // CHECK2-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
1872 // CHECK2-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
1873 // CHECK2-NEXT:    [[K:%.*]] = alloca i64, align 8
1874 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1875 // CHECK2-NEXT:    [[K_CASTED:%.*]] = alloca i64, align 8
1876 // CHECK2-NEXT:    [[LIN:%.*]] = alloca i32, align 4
1877 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
1878 // CHECK2-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
1879 // CHECK2-NEXT:    [[A_CASTED4:%.*]] = alloca i64, align 8
1880 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
1881 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
1882 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
1883 // CHECK2-NEXT:    [[A_CASTED6:%.*]] = alloca i64, align 8
1884 // CHECK2-NEXT:    [[AA_CASTED8:%.*]] = alloca i64, align 8
1885 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8
1886 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8
1887 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8
1888 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1889 // CHECK2-NEXT:    [[A_CASTED15:%.*]] = alloca i64, align 8
1890 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
1891 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8
1892 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8
1893 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8
1894 // CHECK2-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8
1895 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
1896 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1897 // CHECK2-NEXT:    store i32 0, i32* [[A]], align 4
1898 // CHECK2-NEXT:    store i16 0, i16* [[AA]], align 2
1899 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1900 // CHECK2-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
1901 // CHECK2-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
1902 // CHECK2-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
1903 // CHECK2-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
1904 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
1905 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
1906 // CHECK2-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
1907 // CHECK2-NEXT:    [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
1908 // CHECK2-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
1909 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
1910 // CHECK2-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
1911 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates*
1912 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0
1913 // CHECK2-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]])
1914 // CHECK2-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
1915 // CHECK2-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
1916 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A]], align 4
1917 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1918 // CHECK2-NEXT:    store i32 [[TMP11]], i32* [[CONV]], align 4
1919 // CHECK2-NEXT:    [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8
1920 // CHECK2-NEXT:    [[TMP13:%.*]] = load i64, i64* [[K]], align 8
1921 // CHECK2-NEXT:    store i64 [[TMP13]], i64* [[K_CASTED]], align 8
1922 // CHECK2-NEXT:    [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8
1923 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]]
1924 // CHECK2-NEXT:    store i32 12, i32* [[LIN]], align 4
1925 // CHECK2-NEXT:    [[TMP15:%.*]] = load i16, i16* [[AA]], align 2
1926 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
1927 // CHECK2-NEXT:    store i16 [[TMP15]], i16* [[CONV2]], align 2
1928 // CHECK2-NEXT:    [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8
1929 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4
1930 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
1931 // CHECK2-NEXT:    store i32 [[TMP17]], i32* [[CONV3]], align 4
1932 // CHECK2-NEXT:    [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
1933 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A]], align 4
1934 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32*
1935 // CHECK2-NEXT:    store i32 [[TMP19]], i32* [[CONV5]], align 4
1936 // CHECK2-NEXT:    [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8
1937 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1938 // CHECK2-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
1939 // CHECK2-NEXT:    store i64 [[TMP16]], i64* [[TMP22]], align 8
1940 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1941 // CHECK2-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64*
1942 // CHECK2-NEXT:    store i64 [[TMP16]], i64* [[TMP24]], align 8
1943 // CHECK2-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1944 // CHECK2-NEXT:    store i8* null, i8** [[TMP25]], align 8
1945 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1946 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
1947 // CHECK2-NEXT:    store i64 [[TMP18]], i64* [[TMP27]], align 8
1948 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1949 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
1950 // CHECK2-NEXT:    store i64 [[TMP18]], i64* [[TMP29]], align 8
1951 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
1952 // CHECK2-NEXT:    store i8* null, i8** [[TMP30]], align 8
1953 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
1954 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
1955 // CHECK2-NEXT:    store i64 [[TMP20]], i64* [[TMP32]], align 8
1956 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
1957 // CHECK2-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64*
1958 // CHECK2-NEXT:    store i64 [[TMP20]], i64* [[TMP34]], align 8
1959 // CHECK2-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
1960 // CHECK2-NEXT:    store i8* null, i8** [[TMP35]], align 8
1961 // CHECK2-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1962 // CHECK2-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1963 // CHECK2-NEXT:    [[TMP38:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
1964 // CHECK2-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
1965 // CHECK2-NEXT:    br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1966 // CHECK2:       omp_offload.failed:
1967 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]]
1968 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
1969 // CHECK2:       omp_offload.cont:
1970 // CHECK2-NEXT:    [[TMP40:%.*]] = load i32, i32* [[A]], align 4
1971 // CHECK2-NEXT:    [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32*
1972 // CHECK2-NEXT:    store i32 [[TMP40]], i32* [[CONV7]], align 4
1973 // CHECK2-NEXT:    [[TMP41:%.*]] = load i64, i64* [[A_CASTED6]], align 8
1974 // CHECK2-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2
1975 // CHECK2-NEXT:    [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16*
1976 // CHECK2-NEXT:    store i16 [[TMP42]], i16* [[CONV9]], align 2
1977 // CHECK2-NEXT:    [[TMP43:%.*]] = load i64, i64* [[AA_CASTED8]], align 8
1978 // CHECK2-NEXT:    [[TMP44:%.*]] = load i32, i32* [[N_ADDR]], align 4
1979 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP44]], 10
1980 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
1981 // CHECK2:       omp_if.then:
1982 // CHECK2-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
1983 // CHECK2-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64*
1984 // CHECK2-NEXT:    store i64 [[TMP41]], i64* [[TMP46]], align 8
1985 // CHECK2-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
1986 // CHECK2-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64*
1987 // CHECK2-NEXT:    store i64 [[TMP41]], i64* [[TMP48]], align 8
1988 // CHECK2-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
1989 // CHECK2-NEXT:    store i8* null, i8** [[TMP49]], align 8
1990 // CHECK2-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1
1991 // CHECK2-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64*
1992 // CHECK2-NEXT:    store i64 [[TMP43]], i64* [[TMP51]], align 8
1993 // CHECK2-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1
1994 // CHECK2-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
1995 // CHECK2-NEXT:    store i64 [[TMP43]], i64* [[TMP53]], align 8
1996 // CHECK2-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1
1997 // CHECK2-NEXT:    store i8* null, i8** [[TMP54]], align 8
1998 // CHECK2-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
1999 // CHECK2-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
2000 // CHECK2-NEXT:    [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
2001 // CHECK2-NEXT:    [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
2002 // CHECK2-NEXT:    br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
2003 // CHECK2:       omp_offload.failed13:
2004 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]]
2005 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT14]]
2006 // CHECK2:       omp_offload.cont14:
2007 // CHECK2-NEXT:    br label [[OMP_IF_END:%.*]]
2008 // CHECK2:       omp_if.else:
2009 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]]
2010 // CHECK2-NEXT:    br label [[OMP_IF_END]]
2011 // CHECK2:       omp_if.end:
2012 // CHECK2-NEXT:    [[TMP59:%.*]] = load i32, i32* [[A]], align 4
2013 // CHECK2-NEXT:    store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR_]], align 4
2014 // CHECK2-NEXT:    [[TMP60:%.*]] = load i32, i32* [[A]], align 4
2015 // CHECK2-NEXT:    [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32*
2016 // CHECK2-NEXT:    store i32 [[TMP60]], i32* [[CONV16]], align 4
2017 // CHECK2-NEXT:    [[TMP61:%.*]] = load i64, i64* [[A_CASTED15]], align 8
2018 // CHECK2-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2019 // CHECK2-NEXT:    [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
2020 // CHECK2-NEXT:    store i32 [[TMP62]], i32* [[CONV17]], align 4
2021 // CHECK2-NEXT:    [[TMP63:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
2022 // CHECK2-NEXT:    [[TMP64:%.*]] = load i32, i32* [[N_ADDR]], align 4
2023 // CHECK2-NEXT:    [[CMP18:%.*]] = icmp sgt i32 [[TMP64]], 20
2024 // CHECK2-NEXT:    br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]]
2025 // CHECK2:       omp_if.then19:
2026 // CHECK2-NEXT:    [[TMP65:%.*]] = mul nuw i64 [[TMP2]], 4
2027 // CHECK2-NEXT:    [[TMP66:%.*]] = mul nuw i64 5, [[TMP5]]
2028 // CHECK2-NEXT:    [[TMP67:%.*]] = mul nuw i64 [[TMP66]], 8
2029 // CHECK2-NEXT:    [[TMP68:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
2030 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP68]], i8* align 8 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i64 80, i1 false)
2031 // CHECK2-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
2032 // CHECK2-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
2033 // CHECK2-NEXT:    store i64 [[TMP61]], i64* [[TMP70]], align 8
2034 // CHECK2-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
2035 // CHECK2-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
2036 // CHECK2-NEXT:    store i64 [[TMP61]], i64* [[TMP72]], align 8
2037 // CHECK2-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
2038 // CHECK2-NEXT:    store i8* null, i8** [[TMP73]], align 8
2039 // CHECK2-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
2040 // CHECK2-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]**
2041 // CHECK2-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8
2042 // CHECK2-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
2043 // CHECK2-NEXT:    [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]**
2044 // CHECK2-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8
2045 // CHECK2-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
2046 // CHECK2-NEXT:    store i8* null, i8** [[TMP78]], align 8
2047 // CHECK2-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
2048 // CHECK2-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64*
2049 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP80]], align 8
2050 // CHECK2-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
2051 // CHECK2-NEXT:    [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64*
2052 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP82]], align 8
2053 // CHECK2-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
2054 // CHECK2-NEXT:    store i8* null, i8** [[TMP83]], align 8
2055 // CHECK2-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
2056 // CHECK2-NEXT:    [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float**
2057 // CHECK2-NEXT:    store float* [[VLA]], float** [[TMP85]], align 8
2058 // CHECK2-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
2059 // CHECK2-NEXT:    [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float**
2060 // CHECK2-NEXT:    store float* [[VLA]], float** [[TMP87]], align 8
2061 // CHECK2-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
2062 // CHECK2-NEXT:    store i64 [[TMP65]], i64* [[TMP88]], align 8
2063 // CHECK2-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
2064 // CHECK2-NEXT:    store i8* null, i8** [[TMP89]], align 8
2065 // CHECK2-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
2066 // CHECK2-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]**
2067 // CHECK2-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 8
2068 // CHECK2-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
2069 // CHECK2-NEXT:    [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]**
2070 // CHECK2-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8
2071 // CHECK2-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
2072 // CHECK2-NEXT:    store i8* null, i8** [[TMP94]], align 8
2073 // CHECK2-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5
2074 // CHECK2-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64*
2075 // CHECK2-NEXT:    store i64 5, i64* [[TMP96]], align 8
2076 // CHECK2-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5
2077 // CHECK2-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64*
2078 // CHECK2-NEXT:    store i64 5, i64* [[TMP98]], align 8
2079 // CHECK2-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5
2080 // CHECK2-NEXT:    store i8* null, i8** [[TMP99]], align 8
2081 // CHECK2-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6
2082 // CHECK2-NEXT:    [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64*
2083 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[TMP101]], align 8
2084 // CHECK2-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6
2085 // CHECK2-NEXT:    [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i64*
2086 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[TMP103]], align 8
2087 // CHECK2-NEXT:    [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6
2088 // CHECK2-NEXT:    store i8* null, i8** [[TMP104]], align 8
2089 // CHECK2-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7
2090 // CHECK2-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double**
2091 // CHECK2-NEXT:    store double* [[VLA1]], double** [[TMP106]], align 8
2092 // CHECK2-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7
2093 // CHECK2-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to double**
2094 // CHECK2-NEXT:    store double* [[VLA1]], double** [[TMP108]], align 8
2095 // CHECK2-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
2096 // CHECK2-NEXT:    store i64 [[TMP67]], i64* [[TMP109]], align 8
2097 // CHECK2-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7
2098 // CHECK2-NEXT:    store i8* null, i8** [[TMP110]], align 8
2099 // CHECK2-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8
2100 // CHECK2-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to %struct.TT**
2101 // CHECK2-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP112]], align 8
2102 // CHECK2-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8
2103 // CHECK2-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to %struct.TT**
2104 // CHECK2-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP114]], align 8
2105 // CHECK2-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8
2106 // CHECK2-NEXT:    store i8* null, i8** [[TMP115]], align 8
2107 // CHECK2-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9
2108 // CHECK2-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64*
2109 // CHECK2-NEXT:    store i64 [[TMP63]], i64* [[TMP117]], align 8
2110 // CHECK2-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9
2111 // CHECK2-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i64*
2112 // CHECK2-NEXT:    store i64 [[TMP63]], i64* [[TMP119]], align 8
2113 // CHECK2-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9
2114 // CHECK2-NEXT:    store i8* null, i8** [[TMP120]], align 8
2115 // CHECK2-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
2116 // CHECK2-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
2117 // CHECK2-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
2118 // CHECK2-NEXT:    [[TMP124:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP121]], i8** [[TMP122]], i64* [[TMP123]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
2119 // CHECK2-NEXT:    [[TMP125:%.*]] = icmp ne i32 [[TMP124]], 0
2120 // CHECK2-NEXT:    br i1 [[TMP125]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]]
2121 // CHECK2:       omp_offload.failed23:
2122 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]]
2123 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT24]]
2124 // CHECK2:       omp_offload.cont24:
2125 // CHECK2-NEXT:    br label [[OMP_IF_END26:%.*]]
2126 // CHECK2:       omp_if.else25:
2127 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]]
2128 // CHECK2-NEXT:    br label [[OMP_IF_END26]]
2129 // CHECK2:       omp_if.end26:
2130 // CHECK2-NEXT:    [[TMP126:%.*]] = load i32, i32* [[A]], align 4
2131 // CHECK2-NEXT:    [[TMP127:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
2132 // CHECK2-NEXT:    call void @llvm.stackrestore(i8* [[TMP127]])
2133 // CHECK2-NEXT:    ret i32 [[TMP126]]
2134 //
2135 //
2136 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
2137 // CHECK2-SAME: () #[[ATTR2:[0-9]+]] {
2138 // CHECK2-NEXT:  entry:
2139 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2140 // CHECK2-NEXT:    ret void
2141 //
2142 //
2143 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
2144 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
2145 // CHECK2-NEXT:  entry:
2146 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2147 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2148 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2149 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2150 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2151 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2152 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2153 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2154 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2155 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2156 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2157 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2158 // CHECK2-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
2159 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2160 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2161 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2162 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2163 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2164 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2165 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
2166 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2167 // CHECK2:       cond.true:
2168 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2169 // CHECK2:       cond.false:
2170 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2171 // CHECK2-NEXT:    br label [[COND_END]]
2172 // CHECK2:       cond.end:
2173 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2174 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2175 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2176 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2177 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2178 // CHECK2:       omp.inner.for.cond:
2179 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2180 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
2181 // CHECK2-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2182 // CHECK2-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2183 // CHECK2:       omp.inner.for.body:
2184 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2185 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
2186 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
2187 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
2188 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2189 // CHECK2:       omp.body.continue:
2190 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2191 // CHECK2:       omp.inner.for.inc:
2192 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2193 // CHECK2-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
2194 // CHECK2-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
2195 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
2196 // CHECK2:       omp.inner.for.end:
2197 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2198 // CHECK2:       omp.loop.exit:
2199 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
2200 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2201 // CHECK2-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
2202 // CHECK2-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2203 // CHECK2:       .omp.final.then:
2204 // CHECK2-NEXT:    store i32 33, i32* [[I]], align 4
2205 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2206 // CHECK2:       .omp.final.done:
2207 // CHECK2-NEXT:    ret void
2208 //
2209 //
2210 // CHECK2-LABEL: define {{[^@]+}}@.omp_task_entry.
2211 // CHECK2-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
2212 // CHECK2-NEXT:  entry:
2213 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
2214 // CHECK2-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
2215 // CHECK2-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
2216 // CHECK2-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
2217 // CHECK2-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
2218 // CHECK2-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
2219 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
2220 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
2221 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
2222 // CHECK2-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
2223 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
2224 // CHECK2-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
2225 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
2226 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
2227 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
2228 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
2229 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
2230 // CHECK2-NEXT:    [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
2231 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]])
2232 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]])
2233 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
2234 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]])
2235 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25
2236 // CHECK2-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25
2237 // CHECK2-NEXT:    store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25
2238 // CHECK2-NEXT:    store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25
2239 // CHECK2-NEXT:    store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25
2240 // CHECK2-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25
2241 // CHECK2-NEXT:    [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25
2242 // CHECK2-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
2243 // CHECK2-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
2244 // CHECK2-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
2245 // CHECK2:       omp_offload.failed.i:
2246 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]]
2247 // CHECK2-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
2248 // CHECK2:       .omp_outlined..1.exit:
2249 // CHECK2-NEXT:    ret i32 0
2250 //
2251 //
2252 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101
2253 // CHECK2-SAME: (i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] {
2254 // CHECK2-NEXT:  entry:
2255 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2256 // CHECK2-NEXT:    [[K_ADDR:%.*]] = alloca i64, align 8
2257 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2258 // CHECK2-NEXT:    [[K_CASTED:%.*]] = alloca i64, align 8
2259 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2260 // CHECK2-NEXT:    store i64 [[K]], i64* [[K_ADDR]], align 8
2261 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2262 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
2263 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2264 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[CONV1]], align 4
2265 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
2266 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8
2267 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[K_CASTED]], align 8
2268 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8
2269 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
2270 // CHECK2-NEXT:    ret void
2271 //
2272 //
2273 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
2274 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] {
2275 // CHECK2-NEXT:  entry:
2276 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2277 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2278 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2279 // CHECK2-NEXT:    [[K_ADDR:%.*]] = alloca i64, align 8
2280 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2281 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2282 // CHECK2-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
2283 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2284 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2285 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2286 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2287 // CHECK2-NEXT:    [[I:%.*]] = alloca i32, align 4
2288 // CHECK2-NEXT:    [[K1:%.*]] = alloca i64, align 8
2289 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2290 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2291 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2292 // CHECK2-NEXT:    store i64 [[K]], i64* [[K_ADDR]], align 8
2293 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2294 // CHECK2-NEXT:    [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8
2295 // CHECK2-NEXT:    store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8
2296 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2297 // CHECK2-NEXT:    store i32 8, i32* [[DOTOMP_UB]], align 4
2298 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2299 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2300 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2301 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2302 // CHECK2-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]])
2303 // CHECK2-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 35, i32 0, i32 8, i32 1, i32 1)
2304 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2305 // CHECK2:       omp.dispatch.cond:
2306 // CHECK2-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2307 // CHECK2-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0
2308 // CHECK2-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2309 // CHECK2:       omp.dispatch.body:
2310 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2311 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2312 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2313 // CHECK2:       omp.inner.for.cond:
2314 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
2315 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
2316 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2317 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2318 // CHECK2:       omp.inner.for.body:
2319 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
2320 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
2321 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL]]
2322 // CHECK2-NEXT:    store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !26
2323 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !26
2324 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
2325 // CHECK2-NEXT:    [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3
2326 // CHECK2-NEXT:    [[CONV3:%.*]] = sext i32 [[MUL2]] to i64
2327 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]]
2328 // CHECK2-NEXT:    store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !26
2329 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !26
2330 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1
2331 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !26
2332 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2333 // CHECK2:       omp.body.continue:
2334 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2335 // CHECK2:       omp.inner.for.inc:
2336 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
2337 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1
2338 // CHECK2-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
2339 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
2340 // CHECK2:       omp.inner.for.end:
2341 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2342 // CHECK2:       omp.dispatch.inc:
2343 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
2344 // CHECK2:       omp.dispatch.end:
2345 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2346 // CHECK2-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2347 // CHECK2-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2348 // CHECK2:       .omp.final.then:
2349 // CHECK2-NEXT:    store i32 1, i32* [[I]], align 4
2350 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2351 // CHECK2:       .omp.final.done:
2352 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2353 // CHECK2-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2354 // CHECK2-NEXT:    br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
2355 // CHECK2:       .omp.linear.pu:
2356 // CHECK2-NEXT:    [[TMP16:%.*]] = load i64, i64* [[K1]], align 8
2357 // CHECK2-NEXT:    store i64 [[TMP16]], i64* [[K_ADDR]], align 8
2358 // CHECK2-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
2359 // CHECK2:       .omp.linear.pu.done:
2360 // CHECK2-NEXT:    ret void
2361 //
2362 //
2363 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
2364 // CHECK2-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR2]] {
2365 // CHECK2-NEXT:  entry:
2366 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2367 // CHECK2-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
2368 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2369 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
2370 // CHECK2-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
2371 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2372 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2373 // CHECK2-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
2374 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2375 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2376 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
2377 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2378 // CHECK2-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
2379 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
2380 // CHECK2-NEXT:    store i16 [[TMP0]], i16* [[CONV3]], align 2
2381 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
2382 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
2383 // CHECK2-NEXT:    [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
2384 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[CONV4]], align 4
2385 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
2386 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4
2387 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2388 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[CONV5]], align 4
2389 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8
2390 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]])
2391 // CHECK2-NEXT:    ret void
2392 //
2393 //
2394 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
2395 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
2396 // CHECK2-NEXT:  entry:
2397 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2398 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2399 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2400 // CHECK2-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
2401 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2402 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
2403 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i64, align 8
2404 // CHECK2-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
2405 // CHECK2-NEXT:    [[DOTLINEAR_START3:%.*]] = alloca i32, align 4
2406 // CHECK2-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
2407 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
2408 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
2409 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
2410 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2411 // CHECK2-NEXT:    [[IT:%.*]] = alloca i64, align 8
2412 // CHECK2-NEXT:    [[LIN4:%.*]] = alloca i32, align 4
2413 // CHECK2-NEXT:    [[A5:%.*]] = alloca i32, align 4
2414 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2415 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2416 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2417 // CHECK2-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
2418 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2419 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2420 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
2421 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2422 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4
2423 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
2424 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4
2425 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4
2426 // CHECK2-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
2427 // CHECK2-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
2428 // CHECK2-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
2429 // CHECK2-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
2430 // CHECK2-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
2431 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2432 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2433 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
2434 // CHECK2-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]])
2435 // CHECK2-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
2436 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
2437 // CHECK2-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
2438 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2439 // CHECK2:       cond.true:
2440 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2441 // CHECK2:       cond.false:
2442 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
2443 // CHECK2-NEXT:    br label [[COND_END]]
2444 // CHECK2:       cond.end:
2445 // CHECK2-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
2446 // CHECK2-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
2447 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
2448 // CHECK2-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
2449 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2450 // CHECK2:       omp.inner.for.cond:
2451 // CHECK2-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
2452 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29
2453 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
2454 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2455 // CHECK2:       omp.inner.for.body:
2456 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
2457 // CHECK2-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
2458 // CHECK2-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
2459 // CHECK2-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !29
2460 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !29
2461 // CHECK2-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP10]] to i64
2462 // CHECK2-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
2463 // CHECK2-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29
2464 // CHECK2-NEXT:    [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]]
2465 // CHECK2-NEXT:    [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]]
2466 // CHECK2-NEXT:    [[CONV9:%.*]] = trunc i64 [[ADD]] to i32
2467 // CHECK2-NEXT:    store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !29
2468 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !29
2469 // CHECK2-NEXT:    [[CONV10:%.*]] = sext i32 [[TMP13]] to i64
2470 // CHECK2-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
2471 // CHECK2-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29
2472 // CHECK2-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]]
2473 // CHECK2-NEXT:    [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]]
2474 // CHECK2-NEXT:    [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32
2475 // CHECK2-NEXT:    store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !29
2476 // CHECK2-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !29
2477 // CHECK2-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP16]] to i32
2478 // CHECK2-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
2479 // CHECK2-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
2480 // CHECK2-NEXT:    store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group !29
2481 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2482 // CHECK2:       omp.body.continue:
2483 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2484 // CHECK2:       omp.inner.for.inc:
2485 // CHECK2-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
2486 // CHECK2-NEXT:    [[ADD17:%.*]] = add i64 [[TMP17]], 1
2487 // CHECK2-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
2488 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
2489 // CHECK2:       omp.inner.for.end:
2490 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2491 // CHECK2:       omp.loop.exit:
2492 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
2493 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2494 // CHECK2-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
2495 // CHECK2-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2496 // CHECK2:       .omp.final.then:
2497 // CHECK2-NEXT:    store i64 400, i64* [[IT]], align 8
2498 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2499 // CHECK2:       .omp.final.done:
2500 // CHECK2-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2501 // CHECK2-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
2502 // CHECK2-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
2503 // CHECK2:       .omp.linear.pu:
2504 // CHECK2-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4
2505 // CHECK2-NEXT:    store i32 [[TMP22]], i32* [[CONV1]], align 4
2506 // CHECK2-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A5]], align 4
2507 // CHECK2-NEXT:    store i32 [[TMP23]], i32* [[CONV2]], align 4
2508 // CHECK2-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
2509 // CHECK2:       .omp.linear.pu.done:
2510 // CHECK2-NEXT:    ret void
2511 //
2512 //
2513 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
2514 // CHECK2-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] {
2515 // CHECK2-NEXT:  entry:
2516 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2517 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2518 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2519 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
2520 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2521 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2522 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2523 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2524 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
2525 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2526 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
2527 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
2528 // CHECK2-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
2529 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
2530 // CHECK2-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
2531 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
2532 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
2533 // CHECK2-NEXT:    ret void
2534 //
2535 //
2536 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
2537 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
2538 // CHECK2-NEXT:  entry:
2539 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2540 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2541 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2542 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
2543 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2544 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i16, align 2
2545 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2546 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2547 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2548 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2549 // CHECK2-NEXT:    [[IT:%.*]] = alloca i16, align 2
2550 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2551 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2552 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2553 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
2554 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2555 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
2556 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2557 // CHECK2-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
2558 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2559 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2560 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2561 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2562 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2563 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2564 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
2565 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2566 // CHECK2:       cond.true:
2567 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2568 // CHECK2:       cond.false:
2569 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2570 // CHECK2-NEXT:    br label [[COND_END]]
2571 // CHECK2:       cond.end:
2572 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2573 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2574 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2575 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2576 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2577 // CHECK2:       omp.inner.for.cond:
2578 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
2579 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
2580 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2581 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2582 // CHECK2:       omp.inner.for.body:
2583 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
2584 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
2585 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
2586 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
2587 // CHECK2-NEXT:    store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !32
2588 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !32
2589 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
2590 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !32
2591 // CHECK2-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !32
2592 // CHECK2-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
2593 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
2594 // CHECK2-NEXT:    [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
2595 // CHECK2-NEXT:    store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group !32
2596 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2597 // CHECK2:       omp.body.continue:
2598 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2599 // CHECK2:       omp.inner.for.inc:
2600 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
2601 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
2602 // CHECK2-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
2603 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
2604 // CHECK2:       omp.inner.for.end:
2605 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2606 // CHECK2:       omp.loop.exit:
2607 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
2608 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2609 // CHECK2-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
2610 // CHECK2-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2611 // CHECK2:       .omp.final.then:
2612 // CHECK2-NEXT:    store i16 22, i16* [[IT]], align 2
2613 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2614 // CHECK2:       .omp.final.done:
2615 // CHECK2-NEXT:    ret void
2616 //
2617 //
2618 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
2619 // CHECK2-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
2620 // CHECK2-NEXT:  entry:
2621 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2622 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
2623 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
2624 // CHECK2-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
2625 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
2626 // CHECK2-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
2627 // CHECK2-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
2628 // CHECK2-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
2629 // CHECK2-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
2630 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2631 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2632 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
2633 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2634 // CHECK2-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
2635 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
2636 // CHECK2-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
2637 // CHECK2-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
2638 // CHECK2-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
2639 // CHECK2-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
2640 // CHECK2-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
2641 // CHECK2-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
2642 // CHECK2-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2643 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2644 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
2645 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
2646 // CHECK2-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
2647 // CHECK2-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
2648 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
2649 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
2650 // CHECK2-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
2651 // CHECK2-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
2652 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2653 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
2654 // CHECK2-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2655 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[CONV6]], align 4
2656 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
2657 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4
2658 // CHECK2-NEXT:    [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
2659 // CHECK2-NEXT:    store i32 [[TMP10]], i32* [[CONV7]], align 4
2660 // CHECK2-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
2661 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]])
2662 // CHECK2-NEXT:    ret void
2663 //
2664 //
2665 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
2666 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
2667 // CHECK2-NEXT:  entry:
2668 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2669 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2670 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2671 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
2672 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
2673 // CHECK2-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
2674 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
2675 // CHECK2-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
2676 // CHECK2-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
2677 // CHECK2-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
2678 // CHECK2-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
2679 // CHECK2-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2680 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2681 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i8, align 1
2682 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2683 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2684 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2685 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2686 // CHECK2-NEXT:    [[IT:%.*]] = alloca i8, align 1
2687 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2688 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2689 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2690 // CHECK2-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
2691 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
2692 // CHECK2-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
2693 // CHECK2-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
2694 // CHECK2-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
2695 // CHECK2-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
2696 // CHECK2-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
2697 // CHECK2-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
2698 // CHECK2-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2699 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2700 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
2701 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
2702 // CHECK2-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
2703 // CHECK2-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
2704 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
2705 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
2706 // CHECK2-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
2707 // CHECK2-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
2708 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2709 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2710 // CHECK2-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
2711 // CHECK2-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2712 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2713 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4
2714 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2715 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
2716 // CHECK2-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
2717 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2718 // CHECK2:       omp.dispatch.cond:
2719 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2720 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
2721 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2722 // CHECK2:       cond.true:
2723 // CHECK2-NEXT:    br label [[COND_END:%.*]]
2724 // CHECK2:       cond.false:
2725 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2726 // CHECK2-NEXT:    br label [[COND_END]]
2727 // CHECK2:       cond.end:
2728 // CHECK2-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
2729 // CHECK2-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2730 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2731 // CHECK2-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
2732 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2733 // CHECK2-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2734 // CHECK2-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
2735 // CHECK2-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2736 // CHECK2:       omp.dispatch.body:
2737 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2738 // CHECK2:       omp.inner.for.cond:
2739 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2740 // CHECK2-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
2741 // CHECK2-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
2742 // CHECK2-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2743 // CHECK2:       omp.inner.for.body:
2744 // CHECK2-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2745 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
2746 // CHECK2-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
2747 // CHECK2-NEXT:    [[CONV8:%.*]] = trunc i32 [[SUB]] to i8
2748 // CHECK2-NEXT:    store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !35
2749 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !35
2750 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
2751 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group !35
2752 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
2753 // CHECK2-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35
2754 // CHECK2-NEXT:    [[CONV9:%.*]] = fpext float [[TMP20]] to double
2755 // CHECK2-NEXT:    [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
2756 // CHECK2-NEXT:    [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
2757 // CHECK2-NEXT:    store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35
2758 // CHECK2-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
2759 // CHECK2-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !35
2760 // CHECK2-NEXT:    [[CONV13:%.*]] = fpext float [[TMP21]] to double
2761 // CHECK2-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
2762 // CHECK2-NEXT:    [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
2763 // CHECK2-NEXT:    store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !35
2764 // CHECK2-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
2765 // CHECK2-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
2766 // CHECK2-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !35
2767 // CHECK2-NEXT:    [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00
2768 // CHECK2-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !35
2769 // CHECK2-NEXT:    [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]]
2770 // CHECK2-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]]
2771 // CHECK2-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
2772 // CHECK2-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !35
2773 // CHECK2-NEXT:    [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00
2774 // CHECK2-NEXT:    store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !35
2775 // CHECK2-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
2776 // CHECK2-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35
2777 // CHECK2-NEXT:    [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1
2778 // CHECK2-NEXT:    store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !35
2779 // CHECK2-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
2780 // CHECK2-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35
2781 // CHECK2-NEXT:    [[CONV23:%.*]] = sext i8 [[TMP26]] to i32
2782 // CHECK2-NEXT:    [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
2783 // CHECK2-NEXT:    [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
2784 // CHECK2-NEXT:    store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !35
2785 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2786 // CHECK2:       omp.body.continue:
2787 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2788 // CHECK2:       omp.inner.for.inc:
2789 // CHECK2-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2790 // CHECK2-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1
2791 // CHECK2-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
2792 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
2793 // CHECK2:       omp.inner.for.end:
2794 // CHECK2-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2795 // CHECK2:       omp.dispatch.inc:
2796 // CHECK2-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2797 // CHECK2-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2798 // CHECK2-NEXT:    [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
2799 // CHECK2-NEXT:    store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4
2800 // CHECK2-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2801 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2802 // CHECK2-NEXT:    [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
2803 // CHECK2-NEXT:    store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4
2804 // CHECK2-NEXT:    br label [[OMP_DISPATCH_COND]]
2805 // CHECK2:       omp.dispatch.end:
2806 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
2807 // CHECK2-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2808 // CHECK2-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
2809 // CHECK2-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2810 // CHECK2:       .omp.final.then:
2811 // CHECK2-NEXT:    store i8 96, i8* [[IT]], align 1
2812 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2813 // CHECK2:       .omp.final.done:
2814 // CHECK2-NEXT:    ret void
2815 //
2816 //
2817 // CHECK2-LABEL: define {{[^@]+}}@_Z3bari
2818 // CHECK2-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
2819 // CHECK2-NEXT:  entry:
2820 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2821 // CHECK2-NEXT:    [[A:%.*]] = alloca i32, align 4
2822 // CHECK2-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
2823 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2824 // CHECK2-NEXT:    store i32 0, i32* [[A]], align 4
2825 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
2826 // CHECK2-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
2827 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
2828 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
2829 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
2830 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
2831 // CHECK2-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
2832 // CHECK2-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
2833 // CHECK2-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
2834 // CHECK2-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
2835 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
2836 // CHECK2-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
2837 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
2838 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
2839 // CHECK2-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
2840 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
2841 // CHECK2-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
2842 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
2843 // CHECK2-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
2844 // CHECK2-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
2845 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
2846 // CHECK2-NEXT:    ret i32 [[TMP8]]
2847 //
2848 //
2849 // CHECK2-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
2850 // CHECK2-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
2851 // CHECK2-NEXT:  entry:
2852 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
2853 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2854 // CHECK2-NEXT:    [[B:%.*]] = alloca i32, align 4
2855 // CHECK2-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
2856 // CHECK2-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
2857 // CHECK2-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
2858 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
2859 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
2860 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
2861 // CHECK2-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
2862 // CHECK2-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
2863 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2864 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
2865 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
2866 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
2867 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
2868 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
2869 // CHECK2-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
2870 // CHECK2-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
2871 // CHECK2-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
2872 // CHECK2-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
2873 // CHECK2-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
2874 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
2875 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B]], align 4
2876 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
2877 // CHECK2-NEXT:    store i32 [[TMP5]], i32* [[CONV]], align 4
2878 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
2879 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4
2880 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60
2881 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
2882 // CHECK2:       omp_if.then:
2883 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
2884 // CHECK2-NEXT:    [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]]
2885 // CHECK2-NEXT:    [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
2886 // CHECK2-NEXT:    [[TMP10:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
2887 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP10]], i8* align 8 bitcast ([5 x i64]* @.offload_sizes.11 to i8*), i64 40, i1 false)
2888 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2889 // CHECK2-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to %struct.S1**
2890 // CHECK2-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP12]], align 8
2891 // CHECK2-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2892 // CHECK2-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to double**
2893 // CHECK2-NEXT:    store double* [[A]], double** [[TMP14]], align 8
2894 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
2895 // CHECK2-NEXT:    store i8* null, i8** [[TMP15]], align 8
2896 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
2897 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64*
2898 // CHECK2-NEXT:    store i64 [[TMP6]], i64* [[TMP17]], align 8
2899 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
2900 // CHECK2-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64*
2901 // CHECK2-NEXT:    store i64 [[TMP6]], i64* [[TMP19]], align 8
2902 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
2903 // CHECK2-NEXT:    store i8* null, i8** [[TMP20]], align 8
2904 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
2905 // CHECK2-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
2906 // CHECK2-NEXT:    store i64 2, i64* [[TMP22]], align 8
2907 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
2908 // CHECK2-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64*
2909 // CHECK2-NEXT:    store i64 2, i64* [[TMP24]], align 8
2910 // CHECK2-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
2911 // CHECK2-NEXT:    store i8* null, i8** [[TMP25]], align 8
2912 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
2913 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
2914 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP27]], align 8
2915 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
2916 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
2917 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP29]], align 8
2918 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
2919 // CHECK2-NEXT:    store i8* null, i8** [[TMP30]], align 8
2920 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
2921 // CHECK2-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i16**
2922 // CHECK2-NEXT:    store i16* [[VLA]], i16** [[TMP32]], align 8
2923 // CHECK2-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
2924 // CHECK2-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16**
2925 // CHECK2-NEXT:    store i16* [[VLA]], i16** [[TMP34]], align 8
2926 // CHECK2-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
2927 // CHECK2-NEXT:    store i64 [[TMP9]], i64* [[TMP35]], align 8
2928 // CHECK2-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
2929 // CHECK2-NEXT:    store i8* null, i8** [[TMP36]], align 8
2930 // CHECK2-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2931 // CHECK2-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2932 // CHECK2-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
2933 // CHECK2-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* [[TMP39]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
2934 // CHECK2-NEXT:    [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
2935 // CHECK2-NEXT:    br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2936 // CHECK2:       omp_offload.failed:
2937 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]]
2938 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
2939 // CHECK2:       omp_offload.cont:
2940 // CHECK2-NEXT:    br label [[OMP_IF_END:%.*]]
2941 // CHECK2:       omp_if.else:
2942 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]]
2943 // CHECK2-NEXT:    br label [[OMP_IF_END]]
2944 // CHECK2:       omp_if.end:
2945 // CHECK2-NEXT:    [[TMP42:%.*]] = mul nsw i64 1, [[TMP2]]
2946 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP42]]
2947 // CHECK2-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
2948 // CHECK2-NEXT:    [[TMP43:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
2949 // CHECK2-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP43]] to i32
2950 // CHECK2-NEXT:    [[TMP44:%.*]] = load i32, i32* [[B]], align 4
2951 // CHECK2-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP44]]
2952 // CHECK2-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
2953 // CHECK2-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
2954 // CHECK2-NEXT:    ret i32 [[ADD4]]
2955 //
2956 //
2957 // CHECK2-LABEL: define {{[^@]+}}@_ZL7fstatici
2958 // CHECK2-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
2959 // CHECK2-NEXT:  entry:
2960 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2961 // CHECK2-NEXT:    [[A:%.*]] = alloca i32, align 4
2962 // CHECK2-NEXT:    [[AA:%.*]] = alloca i16, align 2
2963 // CHECK2-NEXT:    [[AAA:%.*]] = alloca i8, align 1
2964 // CHECK2-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
2965 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2966 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
2967 // CHECK2-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
2968 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
2969 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
2970 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
2971 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2972 // CHECK2-NEXT:    store i32 0, i32* [[A]], align 4
2973 // CHECK2-NEXT:    store i16 0, i16* [[AA]], align 2
2974 // CHECK2-NEXT:    store i8 0, i8* [[AAA]], align 1
2975 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
2976 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2977 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
2978 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
2979 // CHECK2-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
2980 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
2981 // CHECK2-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
2982 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
2983 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
2984 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
2985 // CHECK2-NEXT:    store i8 [[TMP4]], i8* [[CONV2]], align 1
2986 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
2987 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
2988 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
2989 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
2990 // CHECK2:       omp_if.then:
2991 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2992 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
2993 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
2994 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2995 // CHECK2-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
2996 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
2997 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
2998 // CHECK2-NEXT:    store i8* null, i8** [[TMP11]], align 8
2999 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3000 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
3001 // CHECK2-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
3002 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3003 // CHECK2-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
3004 // CHECK2-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
3005 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
3006 // CHECK2-NEXT:    store i8* null, i8** [[TMP16]], align 8
3007 // CHECK2-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3008 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
3009 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[TMP18]], align 8
3010 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3011 // CHECK2-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
3012 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[TMP20]], align 8
3013 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
3014 // CHECK2-NEXT:    store i8* null, i8** [[TMP21]], align 8
3015 // CHECK2-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
3016 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
3017 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8
3018 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
3019 // CHECK2-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
3020 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8
3021 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
3022 // CHECK2-NEXT:    store i8* null, i8** [[TMP26]], align 8
3023 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3024 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3025 // CHECK2-NEXT:    [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
3026 // CHECK2-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
3027 // CHECK2-NEXT:    br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3028 // CHECK2:       omp_offload.failed:
3029 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
3030 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3031 // CHECK2:       omp_offload.cont:
3032 // CHECK2-NEXT:    br label [[OMP_IF_END:%.*]]
3033 // CHECK2:       omp_if.else:
3034 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
3035 // CHECK2-NEXT:    br label [[OMP_IF_END]]
3036 // CHECK2:       omp_if.end:
3037 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32, i32* [[A]], align 4
3038 // CHECK2-NEXT:    ret i32 [[TMP31]]
3039 //
3040 //
3041 // CHECK2-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
3042 // CHECK2-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
3043 // CHECK2-NEXT:  entry:
3044 // CHECK2-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3045 // CHECK2-NEXT:    [[A:%.*]] = alloca i32, align 4
3046 // CHECK2-NEXT:    [[AA:%.*]] = alloca i16, align 2
3047 // CHECK2-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
3048 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
3049 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
3050 // CHECK2-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
3051 // CHECK2-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
3052 // CHECK2-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
3053 // CHECK2-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3054 // CHECK2-NEXT:    store i32 0, i32* [[A]], align 4
3055 // CHECK2-NEXT:    store i16 0, i16* [[AA]], align 2
3056 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
3057 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
3058 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
3059 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
3060 // CHECK2-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
3061 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
3062 // CHECK2-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
3063 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
3064 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
3065 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
3066 // CHECK2-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
3067 // CHECK2:       omp_if.then:
3068 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3069 // CHECK2-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
3070 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
3071 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3072 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
3073 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
3074 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
3075 // CHECK2-NEXT:    store i8* null, i8** [[TMP9]], align 8
3076 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3077 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
3078 // CHECK2-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
3079 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3080 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
3081 // CHECK2-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
3082 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
3083 // CHECK2-NEXT:    store i8* null, i8** [[TMP14]], align 8
3084 // CHECK2-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3085 // CHECK2-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
3086 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
3087 // CHECK2-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3088 // CHECK2-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
3089 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
3090 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
3091 // CHECK2-NEXT:    store i8* null, i8** [[TMP19]], align 8
3092 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3093 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3094 // CHECK2-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
3095 // CHECK2-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
3096 // CHECK2-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3097 // CHECK2:       omp_offload.failed:
3098 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
3099 // CHECK2-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3100 // CHECK2:       omp_offload.cont:
3101 // CHECK2-NEXT:    br label [[OMP_IF_END:%.*]]
3102 // CHECK2:       omp_if.else:
3103 // CHECK2-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
3104 // CHECK2-NEXT:    br label [[OMP_IF_END]]
3105 // CHECK2:       omp_if.end:
3106 // CHECK2-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
3107 // CHECK2-NEXT:    ret i32 [[TMP24]]
3108 //
3109 //
3110 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
3111 // CHECK2-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
3112 // CHECK2-NEXT:  entry:
3113 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
3114 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
3115 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
3116 // CHECK2-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
3117 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
3118 // CHECK2-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
3119 // CHECK2-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
3120 // CHECK2-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
3121 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
3122 // CHECK2-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
3123 // CHECK2-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
3124 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
3125 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
3126 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
3127 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
3128 // CHECK2-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
3129 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
3130 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
3131 // CHECK2-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
3132 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
3133 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
3134 // CHECK2-NEXT:    ret void
3135 //
3136 //
3137 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
3138 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] {
3139 // CHECK2-NEXT:  entry:
3140 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3141 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3142 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
3143 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
3144 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
3145 // CHECK2-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
3146 // CHECK2-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
3147 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
3148 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i64, align 8
3149 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
3150 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
3151 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
3152 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3153 // CHECK2-NEXT:    [[IT:%.*]] = alloca i64, align 8
3154 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3155 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3156 // CHECK2-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
3157 // CHECK2-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
3158 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
3159 // CHECK2-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
3160 // CHECK2-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
3161 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
3162 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
3163 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
3164 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
3165 // CHECK2-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
3166 // CHECK2-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
3167 // CHECK2-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
3168 // CHECK2-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
3169 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3170 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3171 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
3172 // CHECK2-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
3173 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
3174 // CHECK2-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3
3175 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3176 // CHECK2:       cond.true:
3177 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3178 // CHECK2:       cond.false:
3179 // CHECK2-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
3180 // CHECK2-NEXT:    br label [[COND_END]]
3181 // CHECK2:       cond.end:
3182 // CHECK2-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
3183 // CHECK2-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
3184 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
3185 // CHECK2-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
3186 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3187 // CHECK2:       omp.inner.for.cond:
3188 // CHECK2-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
3189 // CHECK2-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !38
3190 // CHECK2-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]]
3191 // CHECK2-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3192 // CHECK2:       omp.inner.for.body:
3193 // CHECK2-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
3194 // CHECK2-NEXT:    [[MUL:%.*]] = mul i64 [[TMP11]], 400
3195 // CHECK2-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
3196 // CHECK2-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !38
3197 // CHECK2-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !38
3198 // CHECK2-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double
3199 // CHECK2-NEXT:    [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00
3200 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
3201 // CHECK2-NEXT:    store double [[ADD]], double* [[A]], align 8, !llvm.access.group !38
3202 // CHECK2-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
3203 // CHECK2-NEXT:    [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !38
3204 // CHECK2-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
3205 // CHECK2-NEXT:    store double [[INC]], double* [[A5]], align 8, !llvm.access.group !38
3206 // CHECK2-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
3207 // CHECK2-NEXT:    [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]]
3208 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]]
3209 // CHECK2-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
3210 // CHECK2-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !38
3211 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3212 // CHECK2:       omp.body.continue:
3213 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3214 // CHECK2:       omp.inner.for.inc:
3215 // CHECK2-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
3216 // CHECK2-NEXT:    [[ADD8:%.*]] = add i64 [[TMP15]], 1
3217 // CHECK2-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
3218 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
3219 // CHECK2:       omp.inner.for.end:
3220 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3221 // CHECK2:       omp.loop.exit:
3222 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
3223 // CHECK2-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3224 // CHECK2-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
3225 // CHECK2-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3226 // CHECK2:       .omp.final.then:
3227 // CHECK2-NEXT:    store i64 400, i64* [[IT]], align 8
3228 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3229 // CHECK2:       .omp.final.done:
3230 // CHECK2-NEXT:    ret void
3231 //
3232 //
3233 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
3234 // CHECK2-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
3235 // CHECK2-NEXT:  entry:
3236 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
3237 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
3238 // CHECK2-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
3239 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
3240 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
3241 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
3242 // CHECK2-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
3243 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
3244 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
3245 // CHECK2-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
3246 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
3247 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
3248 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
3249 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
3250 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
3251 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
3252 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
3253 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[CONV3]], align 4
3254 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
3255 // CHECK2-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
3256 // CHECK2-NEXT:    [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
3257 // CHECK2-NEXT:    store i16 [[TMP3]], i16* [[CONV4]], align 2
3258 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
3259 // CHECK2-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1
3260 // CHECK2-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
3261 // CHECK2-NEXT:    store i8 [[TMP5]], i8* [[CONV5]], align 1
3262 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
3263 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
3264 // CHECK2-NEXT:    ret void
3265 //
3266 //
3267 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13
3268 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
3269 // CHECK2-NEXT:  entry:
3270 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3271 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3272 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
3273 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
3274 // CHECK2-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
3275 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
3276 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3277 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3278 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3279 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3280 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
3281 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
3282 // CHECK2-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
3283 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
3284 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
3285 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
3286 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
3287 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
3288 // CHECK2-NEXT:    ret void
3289 //
3290 //
3291 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
3292 // CHECK2-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
3293 // CHECK2-NEXT:  entry:
3294 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
3295 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
3296 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
3297 // CHECK2-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
3298 // CHECK2-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
3299 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
3300 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
3301 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
3302 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
3303 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
3304 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
3305 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
3306 // CHECK2-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
3307 // CHECK2-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
3308 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
3309 // CHECK2-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
3310 // CHECK2-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
3311 // CHECK2-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
3312 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
3313 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
3314 // CHECK2-NEXT:    ret void
3315 //
3316 //
3317 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16
3318 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
3319 // CHECK2-NEXT:  entry:
3320 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3321 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3322 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
3323 // CHECK2-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
3324 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
3325 // CHECK2-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
3326 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i64, align 8
3327 // CHECK2-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
3328 // CHECK2-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
3329 // CHECK2-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
3330 // CHECK2-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3331 // CHECK2-NEXT:    [[I:%.*]] = alloca i64, align 8
3332 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3333 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
3334 // CHECK2-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
3335 // CHECK2-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
3336 // CHECK2-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
3337 // CHECK2-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
3338 // CHECK2-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
3339 // CHECK2-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
3340 // CHECK2-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
3341 // CHECK2-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
3342 // CHECK2-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
3343 // CHECK2-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3344 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
3345 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
3346 // CHECK2-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
3347 // CHECK2-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
3348 // CHECK2-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
3349 // CHECK2-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3350 // CHECK2:       cond.true:
3351 // CHECK2-NEXT:    br label [[COND_END:%.*]]
3352 // CHECK2:       cond.false:
3353 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
3354 // CHECK2-NEXT:    br label [[COND_END]]
3355 // CHECK2:       cond.end:
3356 // CHECK2-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3357 // CHECK2-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
3358 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
3359 // CHECK2-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
3360 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3361 // CHECK2:       omp.inner.for.cond:
3362 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41
3363 // CHECK2-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !41
3364 // CHECK2-NEXT:    [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
3365 // CHECK2-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3366 // CHECK2:       omp.inner.for.body:
3367 // CHECK2-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41
3368 // CHECK2-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
3369 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
3370 // CHECK2-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !41
3371 // CHECK2-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !41
3372 // CHECK2-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
3373 // CHECK2-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !41
3374 // CHECK2-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !41
3375 // CHECK2-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
3376 // CHECK2-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
3377 // CHECK2-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
3378 // CHECK2-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !41
3379 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
3380 // CHECK2-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !41
3381 // CHECK2-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
3382 // CHECK2-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !41
3383 // CHECK2-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3384 // CHECK2:       omp.body.continue:
3385 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3386 // CHECK2:       omp.inner.for.inc:
3387 // CHECK2-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41
3388 // CHECK2-NEXT:    [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1
3389 // CHECK2-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !41
3390 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
3391 // CHECK2:       omp.inner.for.end:
3392 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3393 // CHECK2:       omp.loop.exit:
3394 // CHECK2-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
3395 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3396 // CHECK2-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3397 // CHECK2-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3398 // CHECK2:       .omp.final.then:
3399 // CHECK2-NEXT:    store i64 11, i64* [[I]], align 8
3400 // CHECK2-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3401 // CHECK2:       .omp.final.done:
3402 // CHECK2-NEXT:    ret void
3403 //
3404 //
3405 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
3406 // CHECK2-SAME: () #[[ATTR8:[0-9]+]] {
3407 // CHECK2-NEXT:  entry:
3408 // CHECK2-NEXT:    call void @__tgt_register_requires(i64 1)
3409 // CHECK2-NEXT:    ret void
3410 //
3411 //
3412 // CHECK3-LABEL: define {{[^@]+}}@_Z7get_valv
3413 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
3414 // CHECK3-NEXT:  entry:
3415 // CHECK3-NEXT:    ret i64 0
3416 //
3417 //
3418 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooi
3419 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
3420 // CHECK3-NEXT:  entry:
3421 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3422 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
3423 // CHECK3-NEXT:    [[AA:%.*]] = alloca i16, align 2
3424 // CHECK3-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
3425 // CHECK3-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
3426 // CHECK3-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
3427 // CHECK3-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
3428 // CHECK3-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
3429 // CHECK3-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
3430 // CHECK3-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
3431 // CHECK3-NEXT:    [[K:%.*]] = alloca i64, align 8
3432 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
3433 // CHECK3-NEXT:    [[LIN:%.*]] = alloca i32, align 4
3434 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
3435 // CHECK3-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
3436 // CHECK3-NEXT:    [[A_CASTED2:%.*]] = alloca i32, align 4
3437 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
3438 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
3439 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
3440 // CHECK3-NEXT:    [[A_CASTED3:%.*]] = alloca i32, align 4
3441 // CHECK3-NEXT:    [[AA_CASTED4:%.*]] = alloca i32, align 4
3442 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4
3443 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4
3444 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4
3445 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3446 // CHECK3-NEXT:    [[A_CASTED11:%.*]] = alloca i32, align 4
3447 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
3448 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4
3449 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4
3450 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4
3451 // CHECK3-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4
3452 // CHECK3-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
3453 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3454 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
3455 // CHECK3-NEXT:    store i16 0, i16* [[AA]], align 2
3456 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
3457 // CHECK3-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
3458 // CHECK3-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
3459 // CHECK3-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
3460 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
3461 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
3462 // CHECK3-NEXT:    [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
3463 // CHECK3-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
3464 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
3465 // CHECK3-NEXT:    [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
3466 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates*
3467 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0
3468 // CHECK3-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]])
3469 // CHECK3-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
3470 // CHECK3-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
3471 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A]], align 4
3472 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[A_CASTED]], align 4
3473 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4
3474 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]]
3475 // CHECK3-NEXT:    store i32 12, i32* [[LIN]], align 4
3476 // CHECK3-NEXT:    [[TMP11:%.*]] = load i16, i16* [[AA]], align 2
3477 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
3478 // CHECK3-NEXT:    store i16 [[TMP11]], i16* [[CONV]], align 2
3479 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4
3480 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4
3481 // CHECK3-NEXT:    store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4
3482 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
3483 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[A]], align 4
3484 // CHECK3-NEXT:    store i32 [[TMP15]], i32* [[A_CASTED2]], align 4
3485 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4
3486 // CHECK3-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3487 // CHECK3-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
3488 // CHECK3-NEXT:    store i32 [[TMP12]], i32* [[TMP18]], align 4
3489 // CHECK3-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3490 // CHECK3-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
3491 // CHECK3-NEXT:    store i32 [[TMP12]], i32* [[TMP20]], align 4
3492 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3493 // CHECK3-NEXT:    store i8* null, i8** [[TMP21]], align 4
3494 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
3495 // CHECK3-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
3496 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[TMP23]], align 4
3497 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
3498 // CHECK3-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
3499 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[TMP25]], align 4
3500 // CHECK3-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
3501 // CHECK3-NEXT:    store i8* null, i8** [[TMP26]], align 4
3502 // CHECK3-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
3503 // CHECK3-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
3504 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[TMP28]], align 4
3505 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
3506 // CHECK3-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
3507 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[TMP30]], align 4
3508 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
3509 // CHECK3-NEXT:    store i8* null, i8** [[TMP31]], align 4
3510 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3511 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3512 // CHECK3-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
3513 // CHECK3-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
3514 // CHECK3-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3515 // CHECK3:       omp_offload.failed:
3516 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]]
3517 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
3518 // CHECK3:       omp_offload.cont:
3519 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
3520 // CHECK3-NEXT:    store i32 [[TMP36]], i32* [[A_CASTED3]], align 4
3521 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A_CASTED3]], align 4
3522 // CHECK3-NEXT:    [[TMP38:%.*]] = load i16, i16* [[AA]], align 2
3523 // CHECK3-NEXT:    [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16*
3524 // CHECK3-NEXT:    store i16 [[TMP38]], i16* [[CONV5]], align 2
3525 // CHECK3-NEXT:    [[TMP39:%.*]] = load i32, i32* [[AA_CASTED4]], align 4
3526 // CHECK3-NEXT:    [[TMP40:%.*]] = load i32, i32* [[N_ADDR]], align 4
3527 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP40]], 10
3528 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
3529 // CHECK3:       omp_if.then:
3530 // CHECK3-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
3531 // CHECK3-NEXT:    [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32*
3532 // CHECK3-NEXT:    store i32 [[TMP37]], i32* [[TMP42]], align 4
3533 // CHECK3-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
3534 // CHECK3-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
3535 // CHECK3-NEXT:    store i32 [[TMP37]], i32* [[TMP44]], align 4
3536 // CHECK3-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0
3537 // CHECK3-NEXT:    store i8* null, i8** [[TMP45]], align 4
3538 // CHECK3-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1
3539 // CHECK3-NEXT:    [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32*
3540 // CHECK3-NEXT:    store i32 [[TMP39]], i32* [[TMP47]], align 4
3541 // CHECK3-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1
3542 // CHECK3-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32*
3543 // CHECK3-NEXT:    store i32 [[TMP39]], i32* [[TMP49]], align 4
3544 // CHECK3-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1
3545 // CHECK3-NEXT:    store i8* null, i8** [[TMP50]], align 4
3546 // CHECK3-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
3547 // CHECK3-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
3548 // CHECK3-NEXT:    [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP51]], i8** [[TMP52]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
3549 // CHECK3-NEXT:    [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0
3550 // CHECK3-NEXT:    br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]]
3551 // CHECK3:       omp_offload.failed9:
3552 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]]
3553 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT10]]
3554 // CHECK3:       omp_offload.cont10:
3555 // CHECK3-NEXT:    br label [[OMP_IF_END:%.*]]
3556 // CHECK3:       omp_if.else:
3557 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]]
3558 // CHECK3-NEXT:    br label [[OMP_IF_END]]
3559 // CHECK3:       omp_if.end:
3560 // CHECK3-NEXT:    [[TMP55:%.*]] = load i32, i32* [[A]], align 4
3561 // CHECK3-NEXT:    store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_]], align 4
3562 // CHECK3-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
3563 // CHECK3-NEXT:    store i32 [[TMP56]], i32* [[A_CASTED11]], align 4
3564 // CHECK3-NEXT:    [[TMP57:%.*]] = load i32, i32* [[A_CASTED11]], align 4
3565 // CHECK3-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3566 // CHECK3-NEXT:    store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
3567 // CHECK3-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
3568 // CHECK3-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N_ADDR]], align 4
3569 // CHECK3-NEXT:    [[CMP12:%.*]] = icmp sgt i32 [[TMP60]], 20
3570 // CHECK3-NEXT:    br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]]
3571 // CHECK3:       omp_if.then13:
3572 // CHECK3-NEXT:    [[TMP61:%.*]] = mul nuw i32 [[TMP1]], 4
3573 // CHECK3-NEXT:    [[TMP62:%.*]] = sext i32 [[TMP61]] to i64
3574 // CHECK3-NEXT:    [[TMP63:%.*]] = mul nuw i32 5, [[TMP3]]
3575 // CHECK3-NEXT:    [[TMP64:%.*]] = mul nuw i32 [[TMP63]], 8
3576 // CHECK3-NEXT:    [[TMP65:%.*]] = sext i32 [[TMP64]] to i64
3577 // CHECK3-NEXT:    [[TMP66:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
3578 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP66]], i8* align 4 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i32 80, i1 false)
3579 // CHECK3-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
3580 // CHECK3-NEXT:    [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32*
3581 // CHECK3-NEXT:    store i32 [[TMP57]], i32* [[TMP68]], align 4
3582 // CHECK3-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
3583 // CHECK3-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32*
3584 // CHECK3-NEXT:    store i32 [[TMP57]], i32* [[TMP70]], align 4
3585 // CHECK3-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0
3586 // CHECK3-NEXT:    store i8* null, i8** [[TMP71]], align 4
3587 // CHECK3-NEXT:    [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1
3588 // CHECK3-NEXT:    [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]**
3589 // CHECK3-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4
3590 // CHECK3-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1
3591 // CHECK3-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]**
3592 // CHECK3-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4
3593 // CHECK3-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1
3594 // CHECK3-NEXT:    store i8* null, i8** [[TMP76]], align 4
3595 // CHECK3-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2
3596 // CHECK3-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
3597 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP78]], align 4
3598 // CHECK3-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2
3599 // CHECK3-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
3600 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP80]], align 4
3601 // CHECK3-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2
3602 // CHECK3-NEXT:    store i8* null, i8** [[TMP81]], align 4
3603 // CHECK3-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3
3604 // CHECK3-NEXT:    [[TMP83:%.*]] = bitcast i8** [[TMP82]] to float**
3605 // CHECK3-NEXT:    store float* [[VLA]], float** [[TMP83]], align 4
3606 // CHECK3-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3
3607 // CHECK3-NEXT:    [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float**
3608 // CHECK3-NEXT:    store float* [[VLA]], float** [[TMP85]], align 4
3609 // CHECK3-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
3610 // CHECK3-NEXT:    store i64 [[TMP62]], i64* [[TMP86]], align 4
3611 // CHECK3-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3
3612 // CHECK3-NEXT:    store i8* null, i8** [[TMP87]], align 4
3613 // CHECK3-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4
3614 // CHECK3-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]**
3615 // CHECK3-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 4
3616 // CHECK3-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4
3617 // CHECK3-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]**
3618 // CHECK3-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4
3619 // CHECK3-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4
3620 // CHECK3-NEXT:    store i8* null, i8** [[TMP92]], align 4
3621 // CHECK3-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5
3622 // CHECK3-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32*
3623 // CHECK3-NEXT:    store i32 5, i32* [[TMP94]], align 4
3624 // CHECK3-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5
3625 // CHECK3-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32*
3626 // CHECK3-NEXT:    store i32 5, i32* [[TMP96]], align 4
3627 // CHECK3-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5
3628 // CHECK3-NEXT:    store i8* null, i8** [[TMP97]], align 4
3629 // CHECK3-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6
3630 // CHECK3-NEXT:    [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32*
3631 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP99]], align 4
3632 // CHECK3-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6
3633 // CHECK3-NEXT:    [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32*
3634 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP101]], align 4
3635 // CHECK3-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6
3636 // CHECK3-NEXT:    store i8* null, i8** [[TMP102]], align 4
3637 // CHECK3-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7
3638 // CHECK3-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double**
3639 // CHECK3-NEXT:    store double* [[VLA1]], double** [[TMP104]], align 4
3640 // CHECK3-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7
3641 // CHECK3-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double**
3642 // CHECK3-NEXT:    store double* [[VLA1]], double** [[TMP106]], align 4
3643 // CHECK3-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
3644 // CHECK3-NEXT:    store i64 [[TMP65]], i64* [[TMP107]], align 4
3645 // CHECK3-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7
3646 // CHECK3-NEXT:    store i8* null, i8** [[TMP108]], align 4
3647 // CHECK3-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8
3648 // CHECK3-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to %struct.TT**
3649 // CHECK3-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP110]], align 4
3650 // CHECK3-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8
3651 // CHECK3-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to %struct.TT**
3652 // CHECK3-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP112]], align 4
3653 // CHECK3-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8
3654 // CHECK3-NEXT:    store i8* null, i8** [[TMP113]], align 4
3655 // CHECK3-NEXT:    [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9
3656 // CHECK3-NEXT:    [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
3657 // CHECK3-NEXT:    store i32 [[TMP59]], i32* [[TMP115]], align 4
3658 // CHECK3-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9
3659 // CHECK3-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
3660 // CHECK3-NEXT:    store i32 [[TMP59]], i32* [[TMP117]], align 4
3661 // CHECK3-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9
3662 // CHECK3-NEXT:    store i8* null, i8** [[TMP118]], align 4
3663 // CHECK3-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
3664 // CHECK3-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
3665 // CHECK3-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
3666 // CHECK3-NEXT:    [[TMP122:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP119]], i8** [[TMP120]], i64* [[TMP121]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
3667 // CHECK3-NEXT:    [[TMP123:%.*]] = icmp ne i32 [[TMP122]], 0
3668 // CHECK3-NEXT:    br i1 [[TMP123]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
3669 // CHECK3:       omp_offload.failed17:
3670 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]]
3671 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
3672 // CHECK3:       omp_offload.cont18:
3673 // CHECK3-NEXT:    br label [[OMP_IF_END20:%.*]]
3674 // CHECK3:       omp_if.else19:
3675 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]]
3676 // CHECK3-NEXT:    br label [[OMP_IF_END20]]
3677 // CHECK3:       omp_if.end20:
3678 // CHECK3-NEXT:    [[TMP124:%.*]] = load i32, i32* [[A]], align 4
3679 // CHECK3-NEXT:    [[TMP125:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
3680 // CHECK3-NEXT:    call void @llvm.stackrestore(i8* [[TMP125]])
3681 // CHECK3-NEXT:    ret i32 [[TMP124]]
3682 //
3683 //
3684 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
3685 // CHECK3-SAME: () #[[ATTR2:[0-9]+]] {
3686 // CHECK3-NEXT:  entry:
3687 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
3688 // CHECK3-NEXT:    ret void
3689 //
3690 //
3691 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
3692 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
3693 // CHECK3-NEXT:  entry:
3694 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3695 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3696 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3697 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3698 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3699 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3700 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3701 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3702 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3703 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3704 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3705 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3706 // CHECK3-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
3707 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3708 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3709 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3710 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
3711 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3712 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3713 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
3714 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3715 // CHECK3:       cond.true:
3716 // CHECK3-NEXT:    br label [[COND_END:%.*]]
3717 // CHECK3:       cond.false:
3718 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3719 // CHECK3-NEXT:    br label [[COND_END]]
3720 // CHECK3:       cond.end:
3721 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
3722 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3723 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3724 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
3725 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3726 // CHECK3:       omp.inner.for.cond:
3727 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
3728 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
3729 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
3730 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3731 // CHECK3:       omp.inner.for.body:
3732 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
3733 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
3734 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
3735 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
3736 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3737 // CHECK3:       omp.body.continue:
3738 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3739 // CHECK3:       omp.inner.for.inc:
3740 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
3741 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
3742 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
3743 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
3744 // CHECK3:       omp.inner.for.end:
3745 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3746 // CHECK3:       omp.loop.exit:
3747 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
3748 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3749 // CHECK3-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
3750 // CHECK3-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3751 // CHECK3:       .omp.final.then:
3752 // CHECK3-NEXT:    store i32 33, i32* [[I]], align 4
3753 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3754 // CHECK3:       .omp.final.done:
3755 // CHECK3-NEXT:    ret void
3756 //
3757 //
3758 // CHECK3-LABEL: define {{[^@]+}}@.omp_task_entry.
3759 // CHECK3-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
3760 // CHECK3-NEXT:  entry:
3761 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
3762 // CHECK3-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
3763 // CHECK3-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
3764 // CHECK3-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
3765 // CHECK3-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
3766 // CHECK3-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
3767 // CHECK3-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
3768 // CHECK3-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
3769 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
3770 // CHECK3-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
3771 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
3772 // CHECK3-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
3773 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
3774 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
3775 // CHECK3-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
3776 // CHECK3-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
3777 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
3778 // CHECK3-NEXT:    [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
3779 // CHECK3-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
3780 // CHECK3-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
3781 // CHECK3-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
3782 // CHECK3-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
3783 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26
3784 // CHECK3-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26
3785 // CHECK3-NEXT:    store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26
3786 // CHECK3-NEXT:    store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26
3787 // CHECK3-NEXT:    store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26
3788 // CHECK3-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26
3789 // CHECK3-NEXT:    [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26
3790 // CHECK3-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
3791 // CHECK3-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
3792 // CHECK3-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
3793 // CHECK3:       omp_offload.failed.i:
3794 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]]
3795 // CHECK3-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
3796 // CHECK3:       .omp_outlined..1.exit:
3797 // CHECK3-NEXT:    ret i32 0
3798 //
3799 //
3800 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101
3801 // CHECK3-SAME: (i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] {
3802 // CHECK3-NEXT:  entry:
3803 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
3804 // CHECK3-NEXT:    [[K_ADDR:%.*]] = alloca i64*, align 4
3805 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
3806 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
3807 // CHECK3-NEXT:    store i64* [[K]], i64** [[K_ADDR]], align 4
3808 // CHECK3-NEXT:    [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4
3809 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
3810 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
3811 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
3812 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]])
3813 // CHECK3-NEXT:    ret void
3814 //
3815 //
3816 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
3817 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] {
3818 // CHECK3-NEXT:  entry:
3819 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3820 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3821 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
3822 // CHECK3-NEXT:    [[K_ADDR:%.*]] = alloca i64*, align 4
3823 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3824 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3825 // CHECK3-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
3826 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3827 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3828 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3829 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3830 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3831 // CHECK3-NEXT:    [[K1:%.*]] = alloca i64, align 8
3832 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3833 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3834 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
3835 // CHECK3-NEXT:    store i64* [[K]], i64** [[K_ADDR]], align 4
3836 // CHECK3-NEXT:    [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4
3837 // CHECK3-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
3838 // CHECK3-NEXT:    store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8
3839 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3840 // CHECK3-NEXT:    store i32 8, i32* [[DOTOMP_UB]], align 4
3841 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3842 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3843 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3844 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
3845 // CHECK3-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
3846 // CHECK3-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 35, i32 0, i32 8, i32 1, i32 1)
3847 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
3848 // CHECK3:       omp.dispatch.cond:
3849 // CHECK3-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
3850 // CHECK3-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0
3851 // CHECK3-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3852 // CHECK3:       omp.dispatch.body:
3853 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3854 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
3855 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3856 // CHECK3:       omp.inner.for.cond:
3857 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
3858 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
3859 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3860 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3861 // CHECK3:       omp.inner.for.body:
3862 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
3863 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
3864 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL]]
3865 // CHECK3-NEXT:    store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !27
3866 // CHECK3-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !27
3867 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
3868 // CHECK3-NEXT:    [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3
3869 // CHECK3-NEXT:    [[CONV:%.*]] = sext i32 [[MUL2]] to i64
3870 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]]
3871 // CHECK3-NEXT:    store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !27
3872 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !27
3873 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
3874 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !27
3875 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3876 // CHECK3:       omp.body.continue:
3877 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3878 // CHECK3:       omp.inner.for.inc:
3879 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
3880 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
3881 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
3882 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
3883 // CHECK3:       omp.inner.for.end:
3884 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
3885 // CHECK3:       omp.dispatch.inc:
3886 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
3887 // CHECK3:       omp.dispatch.end:
3888 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3889 // CHECK3-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3890 // CHECK3-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3891 // CHECK3:       .omp.final.then:
3892 // CHECK3-NEXT:    store i32 1, i32* [[I]], align 4
3893 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3894 // CHECK3:       .omp.final.done:
3895 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3896 // CHECK3-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
3897 // CHECK3-NEXT:    br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
3898 // CHECK3:       .omp.linear.pu:
3899 // CHECK3-NEXT:    [[TMP17:%.*]] = load i64, i64* [[K1]], align 8
3900 // CHECK3-NEXT:    store i64 [[TMP17]], i64* [[TMP0]], align 8
3901 // CHECK3-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
3902 // CHECK3:       .omp.linear.pu.done:
3903 // CHECK3-NEXT:    ret void
3904 //
3905 //
3906 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
3907 // CHECK3-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR2]] {
3908 // CHECK3-NEXT:  entry:
3909 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
3910 // CHECK3-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
3911 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
3912 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
3913 // CHECK3-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
3914 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
3915 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
3916 // CHECK3-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
3917 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
3918 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
3919 // CHECK3-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
3920 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
3921 // CHECK3-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
3922 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
3923 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
3924 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4
3925 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
3926 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
3927 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[A_CASTED]], align 4
3928 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4
3929 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]])
3930 // CHECK3-NEXT:    ret void
3931 //
3932 //
3933 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
3934 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
3935 // CHECK3-NEXT:  entry:
3936 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3937 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3938 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
3939 // CHECK3-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
3940 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
3941 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
3942 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i64, align 4
3943 // CHECK3-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
3944 // CHECK3-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i32, align 4
3945 // CHECK3-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
3946 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
3947 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
3948 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
3949 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3950 // CHECK3-NEXT:    [[IT:%.*]] = alloca i64, align 8
3951 // CHECK3-NEXT:    [[LIN2:%.*]] = alloca i32, align 4
3952 // CHECK3-NEXT:    [[A3:%.*]] = alloca i32, align 4
3953 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3954 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3955 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
3956 // CHECK3-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
3957 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
3958 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
3959 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
3960 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
3961 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
3962 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4
3963 // CHECK3-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
3964 // CHECK3-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
3965 // CHECK3-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
3966 // CHECK3-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
3967 // CHECK3-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
3968 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3969 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3970 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
3971 // CHECK3-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]])
3972 // CHECK3-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
3973 // CHECK3-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
3974 // CHECK3-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
3975 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3976 // CHECK3:       cond.true:
3977 // CHECK3-NEXT:    br label [[COND_END:%.*]]
3978 // CHECK3:       cond.false:
3979 // CHECK3-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
3980 // CHECK3-NEXT:    br label [[COND_END]]
3981 // CHECK3:       cond.end:
3982 // CHECK3-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
3983 // CHECK3-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
3984 // CHECK3-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
3985 // CHECK3-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
3986 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3987 // CHECK3:       omp.inner.for.cond:
3988 // CHECK3-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
3989 // CHECK3-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30
3990 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
3991 // CHECK3-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3992 // CHECK3:       omp.inner.for.body:
3993 // CHECK3-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
3994 // CHECK3-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
3995 // CHECK3-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
3996 // CHECK3-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !30
3997 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !30
3998 // CHECK3-NEXT:    [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
3999 // CHECK3-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
4000 // CHECK3-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30
4001 // CHECK3-NEXT:    [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]]
4002 // CHECK3-NEXT:    [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]]
4003 // CHECK3-NEXT:    [[CONV7:%.*]] = trunc i64 [[ADD]] to i32
4004 // CHECK3-NEXT:    store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !30
4005 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !30
4006 // CHECK3-NEXT:    [[CONV8:%.*]] = sext i32 [[TMP13]] to i64
4007 // CHECK3-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
4008 // CHECK3-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30
4009 // CHECK3-NEXT:    [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]]
4010 // CHECK3-NEXT:    [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]]
4011 // CHECK3-NEXT:    [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32
4012 // CHECK3-NEXT:    store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !30
4013 // CHECK3-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
4014 // CHECK3-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP16]] to i32
4015 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
4016 // CHECK3-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
4017 // CHECK3-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !30
4018 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4019 // CHECK3:       omp.body.continue:
4020 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4021 // CHECK3:       omp.inner.for.inc:
4022 // CHECK3-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
4023 // CHECK3-NEXT:    [[ADD15:%.*]] = add i64 [[TMP17]], 1
4024 // CHECK3-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
4025 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
4026 // CHECK3:       omp.inner.for.end:
4027 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4028 // CHECK3:       omp.loop.exit:
4029 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
4030 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4031 // CHECK3-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
4032 // CHECK3-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4033 // CHECK3:       .omp.final.then:
4034 // CHECK3-NEXT:    store i64 400, i64* [[IT]], align 8
4035 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4036 // CHECK3:       .omp.final.done:
4037 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4038 // CHECK3-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
4039 // CHECK3-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
4040 // CHECK3:       .omp.linear.pu:
4041 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4
4042 // CHECK3-NEXT:    store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4
4043 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A3]], align 4
4044 // CHECK3-NEXT:    store i32 [[TMP23]], i32* [[A_ADDR]], align 4
4045 // CHECK3-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
4046 // CHECK3:       .omp.linear.pu.done:
4047 // CHECK3-NEXT:    ret void
4048 //
4049 //
4050 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
4051 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] {
4052 // CHECK3-NEXT:  entry:
4053 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4054 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4055 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4056 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4057 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4058 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4059 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4060 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
4061 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
4062 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
4063 // CHECK3-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
4064 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4065 // CHECK3-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
4066 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4067 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
4068 // CHECK3-NEXT:    ret void
4069 //
4070 //
4071 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4
4072 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
4073 // CHECK3-NEXT:  entry:
4074 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4075 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4076 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4077 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4078 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4079 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i16, align 2
4080 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4081 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4082 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4083 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4084 // CHECK3-NEXT:    [[IT:%.*]] = alloca i16, align 2
4085 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4086 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4087 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4088 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4089 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4090 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4091 // CHECK3-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
4092 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4093 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4094 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4095 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
4096 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4097 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4098 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
4099 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4100 // CHECK3:       cond.true:
4101 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4102 // CHECK3:       cond.false:
4103 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4104 // CHECK3-NEXT:    br label [[COND_END]]
4105 // CHECK3:       cond.end:
4106 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
4107 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4108 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4109 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
4110 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4111 // CHECK3:       omp.inner.for.cond:
4112 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
4113 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
4114 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
4115 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4116 // CHECK3:       omp.inner.for.body:
4117 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
4118 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
4119 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
4120 // CHECK3-NEXT:    [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
4121 // CHECK3-NEXT:    store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !33
4122 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33
4123 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
4124 // CHECK3-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !33
4125 // CHECK3-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !33
4126 // CHECK3-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
4127 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
4128 // CHECK3-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
4129 // CHECK3-NEXT:    store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group !33
4130 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4131 // CHECK3:       omp.body.continue:
4132 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4133 // CHECK3:       omp.inner.for.inc:
4134 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
4135 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
4136 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
4137 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
4138 // CHECK3:       omp.inner.for.end:
4139 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4140 // CHECK3:       omp.loop.exit:
4141 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
4142 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4143 // CHECK3-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
4144 // CHECK3-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4145 // CHECK3:       .omp.final.then:
4146 // CHECK3-NEXT:    store i16 22, i16* [[IT]], align 2
4147 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4148 // CHECK3:       .omp.final.done:
4149 // CHECK3-NEXT:    ret void
4150 //
4151 //
4152 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
4153 // CHECK3-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
4154 // CHECK3-NEXT:  entry:
4155 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4156 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
4157 // CHECK3-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
4158 // CHECK3-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
4159 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
4160 // CHECK3-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
4161 // CHECK3-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
4162 // CHECK3-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
4163 // CHECK3-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
4164 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
4165 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4166 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
4167 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4168 // CHECK3-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
4169 // CHECK3-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
4170 // CHECK3-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
4171 // CHECK3-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
4172 // CHECK3-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
4173 // CHECK3-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
4174 // CHECK3-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
4175 // CHECK3-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
4176 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4177 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
4178 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
4179 // CHECK3-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
4180 // CHECK3-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
4181 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
4182 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
4183 // CHECK3-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
4184 // CHECK3-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
4185 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
4186 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
4187 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
4188 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4189 // CHECK3-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
4190 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
4191 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]])
4192 // CHECK3-NEXT:    ret void
4193 //
4194 //
4195 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
4196 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
4197 // CHECK3-NEXT:  entry:
4198 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4199 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4200 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4201 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
4202 // CHECK3-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
4203 // CHECK3-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
4204 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
4205 // CHECK3-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
4206 // CHECK3-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
4207 // CHECK3-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
4208 // CHECK3-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
4209 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
4210 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4211 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i8, align 1
4212 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4213 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4214 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4215 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4216 // CHECK3-NEXT:    [[IT:%.*]] = alloca i8, align 1
4217 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4218 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4219 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4220 // CHECK3-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
4221 // CHECK3-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
4222 // CHECK3-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
4223 // CHECK3-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
4224 // CHECK3-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
4225 // CHECK3-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
4226 // CHECK3-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
4227 // CHECK3-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
4228 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4229 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
4230 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
4231 // CHECK3-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
4232 // CHECK3-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
4233 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
4234 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
4235 // CHECK3-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
4236 // CHECK3-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
4237 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4238 // CHECK3-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
4239 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4240 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4241 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4242 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4243 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
4244 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
4245 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4246 // CHECK3:       omp.dispatch.cond:
4247 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4248 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
4249 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4250 // CHECK3:       cond.true:
4251 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4252 // CHECK3:       cond.false:
4253 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4254 // CHECK3-NEXT:    br label [[COND_END]]
4255 // CHECK3:       cond.end:
4256 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
4257 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4258 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4259 // CHECK3-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
4260 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4261 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4262 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
4263 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4264 // CHECK3:       omp.dispatch.body:
4265 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4266 // CHECK3:       omp.inner.for.cond:
4267 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
4268 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
4269 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
4270 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4271 // CHECK3:       omp.inner.for.body:
4272 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
4273 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
4274 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
4275 // CHECK3-NEXT:    [[CONV:%.*]] = trunc i32 [[SUB]] to i8
4276 // CHECK3-NEXT:    store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !36
4277 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36
4278 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
4279 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !36
4280 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
4281 // CHECK3-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36
4282 // CHECK3-NEXT:    [[CONV7:%.*]] = fpext float [[TMP20]] to double
4283 // CHECK3-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
4284 // CHECK3-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
4285 // CHECK3-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36
4286 // CHECK3-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
4287 // CHECK3-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
4288 // CHECK3-NEXT:    [[CONV11:%.*]] = fpext float [[TMP21]] to double
4289 // CHECK3-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
4290 // CHECK3-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
4291 // CHECK3-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
4292 // CHECK3-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
4293 // CHECK3-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2
4294 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
4295 // CHECK3-NEXT:    [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00
4296 // CHECK3-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
4297 // CHECK3-NEXT:    [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]]
4298 // CHECK3-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]]
4299 // CHECK3-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3
4300 // CHECK3-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
4301 // CHECK3-NEXT:    [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00
4302 // CHECK3-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
4303 // CHECK3-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
4304 // CHECK3-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36
4305 // CHECK3-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1
4306 // CHECK3-NEXT:    store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !36
4307 // CHECK3-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
4308 // CHECK3-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36
4309 // CHECK3-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP26]] to i32
4310 // CHECK3-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
4311 // CHECK3-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
4312 // CHECK3-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !36
4313 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4314 // CHECK3:       omp.body.continue:
4315 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4316 // CHECK3:       omp.inner.for.inc:
4317 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
4318 // CHECK3-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1
4319 // CHECK3-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
4320 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
4321 // CHECK3:       omp.inner.for.end:
4322 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4323 // CHECK3:       omp.dispatch.inc:
4324 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4325 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4326 // CHECK3-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
4327 // CHECK3-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4
4328 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4329 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4330 // CHECK3-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
4331 // CHECK3-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4
4332 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
4333 // CHECK3:       omp.dispatch.end:
4334 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
4335 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4336 // CHECK3-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
4337 // CHECK3-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4338 // CHECK3:       .omp.final.then:
4339 // CHECK3-NEXT:    store i8 96, i8* [[IT]], align 1
4340 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4341 // CHECK3:       .omp.final.done:
4342 // CHECK3-NEXT:    ret void
4343 //
4344 //
4345 // CHECK3-LABEL: define {{[^@]+}}@_Z3bari
4346 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
4347 // CHECK3-NEXT:  entry:
4348 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4349 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
4350 // CHECK3-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
4351 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4352 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
4353 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
4354 // CHECK3-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
4355 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
4356 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
4357 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
4358 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
4359 // CHECK3-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
4360 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
4361 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
4362 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
4363 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
4364 // CHECK3-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
4365 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
4366 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
4367 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
4368 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
4369 // CHECK3-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
4370 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
4371 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
4372 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
4373 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
4374 // CHECK3-NEXT:    ret i32 [[TMP8]]
4375 //
4376 //
4377 // CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
4378 // CHECK3-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
4379 // CHECK3-NEXT:  entry:
4380 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
4381 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4382 // CHECK3-NEXT:    [[B:%.*]] = alloca i32, align 4
4383 // CHECK3-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
4384 // CHECK3-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
4385 // CHECK3-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
4386 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
4387 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
4388 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
4389 // CHECK3-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4
4390 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
4391 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4392 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
4393 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
4394 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
4395 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
4396 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
4397 // CHECK3-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
4398 // CHECK3-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
4399 // CHECK3-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
4400 // CHECK3-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
4401 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
4402 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B]], align 4
4403 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
4404 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
4405 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
4406 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60
4407 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
4408 // CHECK3:       omp_if.then:
4409 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
4410 // CHECK3-NEXT:    [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]]
4411 // CHECK3-NEXT:    [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2
4412 // CHECK3-NEXT:    [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
4413 // CHECK3-NEXT:    [[TMP10:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
4414 // CHECK3-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP10]], i8* align 4 bitcast ([5 x i64]* @.offload_sizes.11 to i8*), i32 40, i1 false)
4415 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4416 // CHECK3-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to %struct.S1**
4417 // CHECK3-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP12]], align 4
4418 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4419 // CHECK3-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to double**
4420 // CHECK3-NEXT:    store double* [[A]], double** [[TMP14]], align 4
4421 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
4422 // CHECK3-NEXT:    store i8* null, i8** [[TMP15]], align 4
4423 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
4424 // CHECK3-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
4425 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP17]], align 4
4426 // CHECK3-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
4427 // CHECK3-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
4428 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP19]], align 4
4429 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
4430 // CHECK3-NEXT:    store i8* null, i8** [[TMP20]], align 4
4431 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
4432 // CHECK3-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
4433 // CHECK3-NEXT:    store i32 2, i32* [[TMP22]], align 4
4434 // CHECK3-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
4435 // CHECK3-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32*
4436 // CHECK3-NEXT:    store i32 2, i32* [[TMP24]], align 4
4437 // CHECK3-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
4438 // CHECK3-NEXT:    store i8* null, i8** [[TMP25]], align 4
4439 // CHECK3-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
4440 // CHECK3-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
4441 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP27]], align 4
4442 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
4443 // CHECK3-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
4444 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP29]], align 4
4445 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
4446 // CHECK3-NEXT:    store i8* null, i8** [[TMP30]], align 4
4447 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
4448 // CHECK3-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i16**
4449 // CHECK3-NEXT:    store i16* [[VLA]], i16** [[TMP32]], align 4
4450 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
4451 // CHECK3-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16**
4452 // CHECK3-NEXT:    store i16* [[VLA]], i16** [[TMP34]], align 4
4453 // CHECK3-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
4454 // CHECK3-NEXT:    store i64 [[TMP9]], i64* [[TMP35]], align 4
4455 // CHECK3-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
4456 // CHECK3-NEXT:    store i8* null, i8** [[TMP36]], align 4
4457 // CHECK3-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4458 // CHECK3-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4459 // CHECK3-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
4460 // CHECK3-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* [[TMP39]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
4461 // CHECK3-NEXT:    [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
4462 // CHECK3-NEXT:    br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4463 // CHECK3:       omp_offload.failed:
4464 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]]
4465 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
4466 // CHECK3:       omp_offload.cont:
4467 // CHECK3-NEXT:    br label [[OMP_IF_END:%.*]]
4468 // CHECK3:       omp_if.else:
4469 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]]
4470 // CHECK3-NEXT:    br label [[OMP_IF_END]]
4471 // CHECK3:       omp_if.end:
4472 // CHECK3-NEXT:    [[TMP42:%.*]] = mul nsw i32 1, [[TMP1]]
4473 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP42]]
4474 // CHECK3-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
4475 // CHECK3-NEXT:    [[TMP43:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
4476 // CHECK3-NEXT:    [[CONV:%.*]] = sext i16 [[TMP43]] to i32
4477 // CHECK3-NEXT:    [[TMP44:%.*]] = load i32, i32* [[B]], align 4
4478 // CHECK3-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP44]]
4479 // CHECK3-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
4480 // CHECK3-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
4481 // CHECK3-NEXT:    ret i32 [[ADD3]]
4482 //
4483 //
4484 // CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici
4485 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
4486 // CHECK3-NEXT:  entry:
4487 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4488 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
4489 // CHECK3-NEXT:    [[AA:%.*]] = alloca i16, align 2
4490 // CHECK3-NEXT:    [[AAA:%.*]] = alloca i8, align 1
4491 // CHECK3-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
4492 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4493 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4494 // CHECK3-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
4495 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
4496 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
4497 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
4498 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4499 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
4500 // CHECK3-NEXT:    store i16 0, i16* [[AA]], align 2
4501 // CHECK3-NEXT:    store i8 0, i8* [[AAA]], align 1
4502 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
4503 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
4504 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
4505 // CHECK3-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
4506 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4507 // CHECK3-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
4508 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4509 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
4510 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
4511 // CHECK3-NEXT:    store i8 [[TMP4]], i8* [[CONV1]], align 1
4512 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
4513 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
4514 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
4515 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
4516 // CHECK3:       omp_if.then:
4517 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4518 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
4519 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
4520 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4521 // CHECK3-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
4522 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
4523 // CHECK3-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
4524 // CHECK3-NEXT:    store i8* null, i8** [[TMP11]], align 4
4525 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
4526 // CHECK3-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
4527 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
4528 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
4529 // CHECK3-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
4530 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
4531 // CHECK3-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
4532 // CHECK3-NEXT:    store i8* null, i8** [[TMP16]], align 4
4533 // CHECK3-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
4534 // CHECK3-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
4535 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP18]], align 4
4536 // CHECK3-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
4537 // CHECK3-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
4538 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[TMP20]], align 4
4539 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
4540 // CHECK3-NEXT:    store i8* null, i8** [[TMP21]], align 4
4541 // CHECK3-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
4542 // CHECK3-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
4543 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4
4544 // CHECK3-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
4545 // CHECK3-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
4546 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4
4547 // CHECK3-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
4548 // CHECK3-NEXT:    store i8* null, i8** [[TMP26]], align 4
4549 // CHECK3-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4550 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4551 // CHECK3-NEXT:    [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
4552 // CHECK3-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
4553 // CHECK3-NEXT:    br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4554 // CHECK3:       omp_offload.failed:
4555 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
4556 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
4557 // CHECK3:       omp_offload.cont:
4558 // CHECK3-NEXT:    br label [[OMP_IF_END:%.*]]
4559 // CHECK3:       omp_if.else:
4560 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
4561 // CHECK3-NEXT:    br label [[OMP_IF_END]]
4562 // CHECK3:       omp_if.end:
4563 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32, i32* [[A]], align 4
4564 // CHECK3-NEXT:    ret i32 [[TMP31]]
4565 //
4566 //
4567 // CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
4568 // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
4569 // CHECK3-NEXT:  entry:
4570 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4571 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
4572 // CHECK3-NEXT:    [[AA:%.*]] = alloca i16, align 2
4573 // CHECK3-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
4574 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4575 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4576 // CHECK3-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
4577 // CHECK3-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
4578 // CHECK3-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
4579 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4580 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
4581 // CHECK3-NEXT:    store i16 0, i16* [[AA]], align 2
4582 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
4583 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
4584 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
4585 // CHECK3-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
4586 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4587 // CHECK3-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
4588 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4589 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
4590 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
4591 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
4592 // CHECK3:       omp_if.then:
4593 // CHECK3-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4594 // CHECK3-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
4595 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
4596 // CHECK3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4597 // CHECK3-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
4598 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
4599 // CHECK3-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
4600 // CHECK3-NEXT:    store i8* null, i8** [[TMP9]], align 4
4601 // CHECK3-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
4602 // CHECK3-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
4603 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
4604 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
4605 // CHECK3-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
4606 // CHECK3-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
4607 // CHECK3-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
4608 // CHECK3-NEXT:    store i8* null, i8** [[TMP14]], align 4
4609 // CHECK3-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
4610 // CHECK3-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
4611 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
4612 // CHECK3-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
4613 // CHECK3-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
4614 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
4615 // CHECK3-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
4616 // CHECK3-NEXT:    store i8* null, i8** [[TMP19]], align 4
4617 // CHECK3-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4618 // CHECK3-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4619 // CHECK3-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
4620 // CHECK3-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
4621 // CHECK3-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4622 // CHECK3:       omp_offload.failed:
4623 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
4624 // CHECK3-NEXT:    br label [[OMP_OFFLOAD_CONT]]
4625 // CHECK3:       omp_offload.cont:
4626 // CHECK3-NEXT:    br label [[OMP_IF_END:%.*]]
4627 // CHECK3:       omp_if.else:
4628 // CHECK3-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
4629 // CHECK3-NEXT:    br label [[OMP_IF_END]]
4630 // CHECK3:       omp_if.end:
4631 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
4632 // CHECK3-NEXT:    ret i32 [[TMP24]]
4633 //
4634 //
4635 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
4636 // CHECK3-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
4637 // CHECK3-NEXT:  entry:
4638 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
4639 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
4640 // CHECK3-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
4641 // CHECK3-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
4642 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
4643 // CHECK3-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
4644 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
4645 // CHECK3-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
4646 // CHECK3-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
4647 // CHECK3-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
4648 // CHECK3-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
4649 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
4650 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
4651 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
4652 // CHECK3-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
4653 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
4654 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
4655 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
4656 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
4657 // CHECK3-NEXT:    ret void
4658 //
4659 //
4660 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
4661 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] {
4662 // CHECK3-NEXT:  entry:
4663 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4664 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4665 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
4666 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
4667 // CHECK3-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
4668 // CHECK3-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
4669 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
4670 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
4671 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i64, align 4
4672 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
4673 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
4674 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
4675 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4676 // CHECK3-NEXT:    [[IT:%.*]] = alloca i64, align 8
4677 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4678 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4679 // CHECK3-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
4680 // CHECK3-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
4681 // CHECK3-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
4682 // CHECK3-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
4683 // CHECK3-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
4684 // CHECK3-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
4685 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
4686 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
4687 // CHECK3-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
4688 // CHECK3-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
4689 // CHECK3-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
4690 // CHECK3-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
4691 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4692 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4693 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
4694 // CHECK3-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
4695 // CHECK3-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
4696 // CHECK3-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3
4697 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4698 // CHECK3:       cond.true:
4699 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4700 // CHECK3:       cond.false:
4701 // CHECK3-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
4702 // CHECK3-NEXT:    br label [[COND_END]]
4703 // CHECK3:       cond.end:
4704 // CHECK3-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
4705 // CHECK3-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
4706 // CHECK3-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
4707 // CHECK3-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
4708 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4709 // CHECK3:       omp.inner.for.cond:
4710 // CHECK3-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
4711 // CHECK3-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !39
4712 // CHECK3-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]]
4713 // CHECK3-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4714 // CHECK3:       omp.inner.for.body:
4715 // CHECK3-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
4716 // CHECK3-NEXT:    [[MUL:%.*]] = mul i64 [[TMP11]], 400
4717 // CHECK3-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
4718 // CHECK3-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !39
4719 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39
4720 // CHECK3-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP12]] to double
4721 // CHECK3-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
4722 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
4723 // CHECK3-NEXT:    store double [[ADD]], double* [[A]], align 4, !llvm.access.group !39
4724 // CHECK3-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
4725 // CHECK3-NEXT:    [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !39
4726 // CHECK3-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
4727 // CHECK3-NEXT:    store double [[INC]], double* [[A4]], align 4, !llvm.access.group !39
4728 // CHECK3-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
4729 // CHECK3-NEXT:    [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]]
4730 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]]
4731 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
4732 // CHECK3-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !39
4733 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4734 // CHECK3:       omp.body.continue:
4735 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4736 // CHECK3:       omp.inner.for.inc:
4737 // CHECK3-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
4738 // CHECK3-NEXT:    [[ADD7:%.*]] = add i64 [[TMP15]], 1
4739 // CHECK3-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
4740 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
4741 // CHECK3:       omp.inner.for.end:
4742 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4743 // CHECK3:       omp.loop.exit:
4744 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
4745 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4746 // CHECK3-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
4747 // CHECK3-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4748 // CHECK3:       .omp.final.then:
4749 // CHECK3-NEXT:    store i64 400, i64* [[IT]], align 8
4750 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4751 // CHECK3:       .omp.final.done:
4752 // CHECK3-NEXT:    ret void
4753 //
4754 //
4755 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
4756 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
4757 // CHECK3-NEXT:  entry:
4758 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4759 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4760 // CHECK3-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
4761 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
4762 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4763 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4764 // CHECK3-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
4765 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4766 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4767 // CHECK3-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
4768 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
4769 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4770 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
4771 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
4772 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
4773 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
4774 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
4775 // CHECK3-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
4776 // CHECK3-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4777 // CHECK3-NEXT:    store i16 [[TMP3]], i16* [[CONV2]], align 2
4778 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4779 // CHECK3-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1
4780 // CHECK3-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
4781 // CHECK3-NEXT:    store i8 [[TMP5]], i8* [[CONV3]], align 1
4782 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
4783 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
4784 // CHECK3-NEXT:    ret void
4785 //
4786 //
4787 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13
4788 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
4789 // CHECK3-NEXT:  entry:
4790 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4791 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4792 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4793 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4794 // CHECK3-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
4795 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
4796 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4797 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4798 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4799 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4800 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4801 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4802 // CHECK3-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
4803 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
4804 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4805 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
4806 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
4807 // CHECK3-NEXT:    ret void
4808 //
4809 //
4810 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
4811 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
4812 // CHECK3-NEXT:  entry:
4813 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4814 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4815 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
4816 // CHECK3-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4817 // CHECK3-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4818 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4819 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4820 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
4821 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4822 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
4823 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
4824 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
4825 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
4826 // CHECK3-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
4827 // CHECK3-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4828 // CHECK3-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
4829 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4830 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
4831 // CHECK3-NEXT:    ret void
4832 //
4833 //
4834 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16
4835 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
4836 // CHECK3-NEXT:  entry:
4837 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4838 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4839 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
4840 // CHECK3-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
4841 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
4842 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
4843 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i64, align 4
4844 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
4845 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
4846 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
4847 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4848 // CHECK3-NEXT:    [[I:%.*]] = alloca i64, align 8
4849 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4850 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4851 // CHECK3-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
4852 // CHECK3-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
4853 // CHECK3-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
4854 // CHECK3-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
4855 // CHECK3-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
4856 // CHECK3-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
4857 // CHECK3-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
4858 // CHECK3-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
4859 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4860 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4861 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
4862 // CHECK3-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
4863 // CHECK3-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
4864 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
4865 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4866 // CHECK3:       cond.true:
4867 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4868 // CHECK3:       cond.false:
4869 // CHECK3-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
4870 // CHECK3-NEXT:    br label [[COND_END]]
4871 // CHECK3:       cond.end:
4872 // CHECK3-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4873 // CHECK3-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
4874 // CHECK3-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
4875 // CHECK3-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
4876 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4877 // CHECK3:       omp.inner.for.cond:
4878 // CHECK3-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42
4879 // CHECK3-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !42
4880 // CHECK3-NEXT:    [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
4881 // CHECK3-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4882 // CHECK3:       omp.inner.for.body:
4883 // CHECK3-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42
4884 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
4885 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
4886 // CHECK3-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !42
4887 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !42
4888 // CHECK3-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
4889 // CHECK3-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !42
4890 // CHECK3-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !42
4891 // CHECK3-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
4892 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
4893 // CHECK3-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
4894 // CHECK3-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !42
4895 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
4896 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !42
4897 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
4898 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !42
4899 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4900 // CHECK3:       omp.body.continue:
4901 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4902 // CHECK3:       omp.inner.for.inc:
4903 // CHECK3-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42
4904 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1
4905 // CHECK3-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42
4906 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
4907 // CHECK3:       omp.inner.for.end:
4908 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4909 // CHECK3:       omp.loop.exit:
4910 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
4911 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4912 // CHECK3-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
4913 // CHECK3-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4914 // CHECK3:       .omp.final.then:
4915 // CHECK3-NEXT:    store i64 11, i64* [[I]], align 8
4916 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4917 // CHECK3:       .omp.final.done:
4918 // CHECK3-NEXT:    ret void
4919 //
4920 //
4921 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
4922 // CHECK3-SAME: () #[[ATTR8:[0-9]+]] {
4923 // CHECK3-NEXT:  entry:
4924 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
4925 // CHECK3-NEXT:    ret void
4926 //
4927 //
4928 // CHECK4-LABEL: define {{[^@]+}}@_Z7get_valv
4929 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
4930 // CHECK4-NEXT:  entry:
4931 // CHECK4-NEXT:    ret i64 0
4932 //
4933 //
4934 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooi
4935 // CHECK4-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
4936 // CHECK4-NEXT:  entry:
4937 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4938 // CHECK4-NEXT:    [[A:%.*]] = alloca i32, align 4
4939 // CHECK4-NEXT:    [[AA:%.*]] = alloca i16, align 2
4940 // CHECK4-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
4941 // CHECK4-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
4942 // CHECK4-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
4943 // CHECK4-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
4944 // CHECK4-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
4945 // CHECK4-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
4946 // CHECK4-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
4947 // CHECK4-NEXT:    [[K:%.*]] = alloca i64, align 8
4948 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
4949 // CHECK4-NEXT:    [[LIN:%.*]] = alloca i32, align 4
4950 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
4951 // CHECK4-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
4952 // CHECK4-NEXT:    [[A_CASTED2:%.*]] = alloca i32, align 4
4953 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
4954 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
4955 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
4956 // CHECK4-NEXT:    [[A_CASTED3:%.*]] = alloca i32, align 4
4957 // CHECK4-NEXT:    [[AA_CASTED4:%.*]] = alloca i32, align 4
4958 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4
4959 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4
4960 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4
4961 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4962 // CHECK4-NEXT:    [[A_CASTED11:%.*]] = alloca i32, align 4
4963 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
4964 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4
4965 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4
4966 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4
4967 // CHECK4-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4
4968 // CHECK4-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
4969 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4970 // CHECK4-NEXT:    store i32 0, i32* [[A]], align 4
4971 // CHECK4-NEXT:    store i16 0, i16* [[AA]], align 2
4972 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
4973 // CHECK4-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
4974 // CHECK4-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
4975 // CHECK4-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
4976 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
4977 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
4978 // CHECK4-NEXT:    [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
4979 // CHECK4-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
4980 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
4981 // CHECK4-NEXT:    [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
4982 // CHECK4-NEXT:    [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates*
4983 // CHECK4-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0
4984 // CHECK4-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]])
4985 // CHECK4-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
4986 // CHECK4-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
4987 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A]], align 4
4988 // CHECK4-NEXT:    store i32 [[TMP9]], i32* [[A_CASTED]], align 4
4989 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4
4990 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]]
4991 // CHECK4-NEXT:    store i32 12, i32* [[LIN]], align 4
4992 // CHECK4-NEXT:    [[TMP11:%.*]] = load i16, i16* [[AA]], align 2
4993 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
4994 // CHECK4-NEXT:    store i16 [[TMP11]], i16* [[CONV]], align 2
4995 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4
4996 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4
4997 // CHECK4-NEXT:    store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4
4998 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
4999 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[A]], align 4
5000 // CHECK4-NEXT:    store i32 [[TMP15]], i32* [[A_CASTED2]], align 4
5001 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4
5002 // CHECK4-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5003 // CHECK4-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
5004 // CHECK4-NEXT:    store i32 [[TMP12]], i32* [[TMP18]], align 4
5005 // CHECK4-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5006 // CHECK4-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
5007 // CHECK4-NEXT:    store i32 [[TMP12]], i32* [[TMP20]], align 4
5008 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
5009 // CHECK4-NEXT:    store i8* null, i8** [[TMP21]], align 4
5010 // CHECK4-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
5011 // CHECK4-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
5012 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[TMP23]], align 4
5013 // CHECK4-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
5014 // CHECK4-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
5015 // CHECK4-NEXT:    store i32 [[TMP14]], i32* [[TMP25]], align 4
5016 // CHECK4-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
5017 // CHECK4-NEXT:    store i8* null, i8** [[TMP26]], align 4
5018 // CHECK4-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
5019 // CHECK4-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
5020 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[TMP28]], align 4
5021 // CHECK4-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
5022 // CHECK4-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
5023 // CHECK4-NEXT:    store i32 [[TMP16]], i32* [[TMP30]], align 4
5024 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
5025 // CHECK4-NEXT:    store i8* null, i8** [[TMP31]], align 4
5026 // CHECK4-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5027 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5028 // CHECK4-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
5029 // CHECK4-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
5030 // CHECK4-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5031 // CHECK4:       omp_offload.failed:
5032 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]]
5033 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
5034 // CHECK4:       omp_offload.cont:
5035 // CHECK4-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
5036 // CHECK4-NEXT:    store i32 [[TMP36]], i32* [[A_CASTED3]], align 4
5037 // CHECK4-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A_CASTED3]], align 4
5038 // CHECK4-NEXT:    [[TMP38:%.*]] = load i16, i16* [[AA]], align 2
5039 // CHECK4-NEXT:    [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16*
5040 // CHECK4-NEXT:    store i16 [[TMP38]], i16* [[CONV5]], align 2
5041 // CHECK4-NEXT:    [[TMP39:%.*]] = load i32, i32* [[AA_CASTED4]], align 4
5042 // CHECK4-NEXT:    [[TMP40:%.*]] = load i32, i32* [[N_ADDR]], align 4
5043 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP40]], 10
5044 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
5045 // CHECK4:       omp_if.then:
5046 // CHECK4-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
5047 // CHECK4-NEXT:    [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32*
5048 // CHECK4-NEXT:    store i32 [[TMP37]], i32* [[TMP42]], align 4
5049 // CHECK4-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
5050 // CHECK4-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
5051 // CHECK4-NEXT:    store i32 [[TMP37]], i32* [[TMP44]], align 4
5052 // CHECK4-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0
5053 // CHECK4-NEXT:    store i8* null, i8** [[TMP45]], align 4
5054 // CHECK4-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1
5055 // CHECK4-NEXT:    [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32*
5056 // CHECK4-NEXT:    store i32 [[TMP39]], i32* [[TMP47]], align 4
5057 // CHECK4-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1
5058 // CHECK4-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32*
5059 // CHECK4-NEXT:    store i32 [[TMP39]], i32* [[TMP49]], align 4
5060 // CHECK4-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1
5061 // CHECK4-NEXT:    store i8* null, i8** [[TMP50]], align 4
5062 // CHECK4-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
5063 // CHECK4-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
5064 // CHECK4-NEXT:    [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP51]], i8** [[TMP52]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
5065 // CHECK4-NEXT:    [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0
5066 // CHECK4-NEXT:    br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]]
5067 // CHECK4:       omp_offload.failed9:
5068 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]]
5069 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT10]]
5070 // CHECK4:       omp_offload.cont10:
5071 // CHECK4-NEXT:    br label [[OMP_IF_END:%.*]]
5072 // CHECK4:       omp_if.else:
5073 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]]
5074 // CHECK4-NEXT:    br label [[OMP_IF_END]]
5075 // CHECK4:       omp_if.end:
5076 // CHECK4-NEXT:    [[TMP55:%.*]] = load i32, i32* [[A]], align 4
5077 // CHECK4-NEXT:    store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_]], align 4
5078 // CHECK4-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
5079 // CHECK4-NEXT:    store i32 [[TMP56]], i32* [[A_CASTED11]], align 4
5080 // CHECK4-NEXT:    [[TMP57:%.*]] = load i32, i32* [[A_CASTED11]], align 4
5081 // CHECK4-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5082 // CHECK4-NEXT:    store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
5083 // CHECK4-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
5084 // CHECK4-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N_ADDR]], align 4
5085 // CHECK4-NEXT:    [[CMP12:%.*]] = icmp sgt i32 [[TMP60]], 20
5086 // CHECK4-NEXT:    br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]]
5087 // CHECK4:       omp_if.then13:
5088 // CHECK4-NEXT:    [[TMP61:%.*]] = mul nuw i32 [[TMP1]], 4
5089 // CHECK4-NEXT:    [[TMP62:%.*]] = sext i32 [[TMP61]] to i64
5090 // CHECK4-NEXT:    [[TMP63:%.*]] = mul nuw i32 5, [[TMP3]]
5091 // CHECK4-NEXT:    [[TMP64:%.*]] = mul nuw i32 [[TMP63]], 8
5092 // CHECK4-NEXT:    [[TMP65:%.*]] = sext i32 [[TMP64]] to i64
5093 // CHECK4-NEXT:    [[TMP66:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
5094 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP66]], i8* align 4 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i32 80, i1 false)
5095 // CHECK4-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
5096 // CHECK4-NEXT:    [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32*
5097 // CHECK4-NEXT:    store i32 [[TMP57]], i32* [[TMP68]], align 4
5098 // CHECK4-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
5099 // CHECK4-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32*
5100 // CHECK4-NEXT:    store i32 [[TMP57]], i32* [[TMP70]], align 4
5101 // CHECK4-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0
5102 // CHECK4-NEXT:    store i8* null, i8** [[TMP71]], align 4
5103 // CHECK4-NEXT:    [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1
5104 // CHECK4-NEXT:    [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]**
5105 // CHECK4-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4
5106 // CHECK4-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1
5107 // CHECK4-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]**
5108 // CHECK4-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4
5109 // CHECK4-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1
5110 // CHECK4-NEXT:    store i8* null, i8** [[TMP76]], align 4
5111 // CHECK4-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2
5112 // CHECK4-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
5113 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP78]], align 4
5114 // CHECK4-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2
5115 // CHECK4-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
5116 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP80]], align 4
5117 // CHECK4-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2
5118 // CHECK4-NEXT:    store i8* null, i8** [[TMP81]], align 4
5119 // CHECK4-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3
5120 // CHECK4-NEXT:    [[TMP83:%.*]] = bitcast i8** [[TMP82]] to float**
5121 // CHECK4-NEXT:    store float* [[VLA]], float** [[TMP83]], align 4
5122 // CHECK4-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3
5123 // CHECK4-NEXT:    [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float**
5124 // CHECK4-NEXT:    store float* [[VLA]], float** [[TMP85]], align 4
5125 // CHECK4-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
5126 // CHECK4-NEXT:    store i64 [[TMP62]], i64* [[TMP86]], align 4
5127 // CHECK4-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3
5128 // CHECK4-NEXT:    store i8* null, i8** [[TMP87]], align 4
5129 // CHECK4-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4
5130 // CHECK4-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]**
5131 // CHECK4-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 4
5132 // CHECK4-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4
5133 // CHECK4-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]**
5134 // CHECK4-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4
5135 // CHECK4-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4
5136 // CHECK4-NEXT:    store i8* null, i8** [[TMP92]], align 4
5137 // CHECK4-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5
5138 // CHECK4-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32*
5139 // CHECK4-NEXT:    store i32 5, i32* [[TMP94]], align 4
5140 // CHECK4-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5
5141 // CHECK4-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32*
5142 // CHECK4-NEXT:    store i32 5, i32* [[TMP96]], align 4
5143 // CHECK4-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5
5144 // CHECK4-NEXT:    store i8* null, i8** [[TMP97]], align 4
5145 // CHECK4-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6
5146 // CHECK4-NEXT:    [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32*
5147 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP99]], align 4
5148 // CHECK4-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6
5149 // CHECK4-NEXT:    [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32*
5150 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP101]], align 4
5151 // CHECK4-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6
5152 // CHECK4-NEXT:    store i8* null, i8** [[TMP102]], align 4
5153 // CHECK4-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7
5154 // CHECK4-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double**
5155 // CHECK4-NEXT:    store double* [[VLA1]], double** [[TMP104]], align 4
5156 // CHECK4-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7
5157 // CHECK4-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double**
5158 // CHECK4-NEXT:    store double* [[VLA1]], double** [[TMP106]], align 4
5159 // CHECK4-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
5160 // CHECK4-NEXT:    store i64 [[TMP65]], i64* [[TMP107]], align 4
5161 // CHECK4-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7
5162 // CHECK4-NEXT:    store i8* null, i8** [[TMP108]], align 4
5163 // CHECK4-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8
5164 // CHECK4-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to %struct.TT**
5165 // CHECK4-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP110]], align 4
5166 // CHECK4-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8
5167 // CHECK4-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to %struct.TT**
5168 // CHECK4-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP112]], align 4
5169 // CHECK4-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8
5170 // CHECK4-NEXT:    store i8* null, i8** [[TMP113]], align 4
5171 // CHECK4-NEXT:    [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9
5172 // CHECK4-NEXT:    [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
5173 // CHECK4-NEXT:    store i32 [[TMP59]], i32* [[TMP115]], align 4
5174 // CHECK4-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9
5175 // CHECK4-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
5176 // CHECK4-NEXT:    store i32 [[TMP59]], i32* [[TMP117]], align 4
5177 // CHECK4-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9
5178 // CHECK4-NEXT:    store i8* null, i8** [[TMP118]], align 4
5179 // CHECK4-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
5180 // CHECK4-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
5181 // CHECK4-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
5182 // CHECK4-NEXT:    [[TMP122:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP119]], i8** [[TMP120]], i64* [[TMP121]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
5183 // CHECK4-NEXT:    [[TMP123:%.*]] = icmp ne i32 [[TMP122]], 0
5184 // CHECK4-NEXT:    br i1 [[TMP123]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
5185 // CHECK4:       omp_offload.failed17:
5186 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]]
5187 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
5188 // CHECK4:       omp_offload.cont18:
5189 // CHECK4-NEXT:    br label [[OMP_IF_END20:%.*]]
5190 // CHECK4:       omp_if.else19:
5191 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]]
5192 // CHECK4-NEXT:    br label [[OMP_IF_END20]]
5193 // CHECK4:       omp_if.end20:
5194 // CHECK4-NEXT:    [[TMP124:%.*]] = load i32, i32* [[A]], align 4
5195 // CHECK4-NEXT:    [[TMP125:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
5196 // CHECK4-NEXT:    call void @llvm.stackrestore(i8* [[TMP125]])
5197 // CHECK4-NEXT:    ret i32 [[TMP124]]
5198 //
5199 //
5200 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
5201 // CHECK4-SAME: () #[[ATTR2:[0-9]+]] {
5202 // CHECK4-NEXT:  entry:
5203 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
5204 // CHECK4-NEXT:    ret void
5205 //
5206 //
5207 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
5208 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
5209 // CHECK4-NEXT:  entry:
5210 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5211 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5212 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5213 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5214 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5215 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5216 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5217 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5218 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
5219 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5220 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5221 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5222 // CHECK4-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
5223 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5224 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5225 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5226 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
5227 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5228 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5229 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
5230 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5231 // CHECK4:       cond.true:
5232 // CHECK4-NEXT:    br label [[COND_END:%.*]]
5233 // CHECK4:       cond.false:
5234 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5235 // CHECK4-NEXT:    br label [[COND_END]]
5236 // CHECK4:       cond.end:
5237 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
5238 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5239 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5240 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
5241 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5242 // CHECK4:       omp.inner.for.cond:
5243 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
5244 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
5245 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
5246 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5247 // CHECK4:       omp.inner.for.body:
5248 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
5249 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
5250 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
5251 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
5252 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5253 // CHECK4:       omp.body.continue:
5254 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5255 // CHECK4:       omp.inner.for.inc:
5256 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
5257 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
5258 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
5259 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
5260 // CHECK4:       omp.inner.for.end:
5261 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5262 // CHECK4:       omp.loop.exit:
5263 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
5264 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5265 // CHECK4-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
5266 // CHECK4-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5267 // CHECK4:       .omp.final.then:
5268 // CHECK4-NEXT:    store i32 33, i32* [[I]], align 4
5269 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5270 // CHECK4:       .omp.final.done:
5271 // CHECK4-NEXT:    ret void
5272 //
5273 //
5274 // CHECK4-LABEL: define {{[^@]+}}@.omp_task_entry.
5275 // CHECK4-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
5276 // CHECK4-NEXT:  entry:
5277 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
5278 // CHECK4-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
5279 // CHECK4-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
5280 // CHECK4-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
5281 // CHECK4-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
5282 // CHECK4-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
5283 // CHECK4-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
5284 // CHECK4-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
5285 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
5286 // CHECK4-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
5287 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
5288 // CHECK4-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
5289 // CHECK4-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
5290 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
5291 // CHECK4-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
5292 // CHECK4-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
5293 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
5294 // CHECK4-NEXT:    [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
5295 // CHECK4-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
5296 // CHECK4-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
5297 // CHECK4-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
5298 // CHECK4-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
5299 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26
5300 // CHECK4-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26
5301 // CHECK4-NEXT:    store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26
5302 // CHECK4-NEXT:    store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26
5303 // CHECK4-NEXT:    store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26
5304 // CHECK4-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26
5305 // CHECK4-NEXT:    [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26
5306 // CHECK4-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
5307 // CHECK4-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
5308 // CHECK4-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
5309 // CHECK4:       omp_offload.failed.i:
5310 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]]
5311 // CHECK4-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
5312 // CHECK4:       .omp_outlined..1.exit:
5313 // CHECK4-NEXT:    ret i32 0
5314 //
5315 //
5316 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101
5317 // CHECK4-SAME: (i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] {
5318 // CHECK4-NEXT:  entry:
5319 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5320 // CHECK4-NEXT:    [[K_ADDR:%.*]] = alloca i64*, align 4
5321 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
5322 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5323 // CHECK4-NEXT:    store i64* [[K]], i64** [[K_ADDR]], align 4
5324 // CHECK4-NEXT:    [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4
5325 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
5326 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
5327 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
5328 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]])
5329 // CHECK4-NEXT:    ret void
5330 //
5331 //
5332 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2
5333 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] {
5334 // CHECK4-NEXT:  entry:
5335 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5336 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5337 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5338 // CHECK4-NEXT:    [[K_ADDR:%.*]] = alloca i64*, align 4
5339 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5340 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5341 // CHECK4-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
5342 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5343 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5344 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5345 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5346 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
5347 // CHECK4-NEXT:    [[K1:%.*]] = alloca i64, align 8
5348 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5349 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5350 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5351 // CHECK4-NEXT:    store i64* [[K]], i64** [[K_ADDR]], align 4
5352 // CHECK4-NEXT:    [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4
5353 // CHECK4-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
5354 // CHECK4-NEXT:    store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8
5355 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5356 // CHECK4-NEXT:    store i32 8, i32* [[DOTOMP_UB]], align 4
5357 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5358 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5359 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5360 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
5361 // CHECK4-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
5362 // CHECK4-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 35, i32 0, i32 8, i32 1, i32 1)
5363 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
5364 // CHECK4:       omp.dispatch.cond:
5365 // CHECK4-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
5366 // CHECK4-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0
5367 // CHECK4-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
5368 // CHECK4:       omp.dispatch.body:
5369 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5370 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
5371 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5372 // CHECK4:       omp.inner.for.cond:
5373 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
5374 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
5375 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
5376 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5377 // CHECK4:       omp.inner.for.body:
5378 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
5379 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
5380 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL]]
5381 // CHECK4-NEXT:    store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !27
5382 // CHECK4-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !27
5383 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
5384 // CHECK4-NEXT:    [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3
5385 // CHECK4-NEXT:    [[CONV:%.*]] = sext i32 [[MUL2]] to i64
5386 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]]
5387 // CHECK4-NEXT:    store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !27
5388 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !27
5389 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
5390 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !27
5391 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5392 // CHECK4:       omp.body.continue:
5393 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5394 // CHECK4:       omp.inner.for.inc:
5395 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
5396 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
5397 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
5398 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
5399 // CHECK4:       omp.inner.for.end:
5400 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
5401 // CHECK4:       omp.dispatch.inc:
5402 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
5403 // CHECK4:       omp.dispatch.end:
5404 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5405 // CHECK4-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5406 // CHECK4-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5407 // CHECK4:       .omp.final.then:
5408 // CHECK4-NEXT:    store i32 1, i32* [[I]], align 4
5409 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5410 // CHECK4:       .omp.final.done:
5411 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5412 // CHECK4-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
5413 // CHECK4-NEXT:    br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
5414 // CHECK4:       .omp.linear.pu:
5415 // CHECK4-NEXT:    [[TMP17:%.*]] = load i64, i64* [[K1]], align 8
5416 // CHECK4-NEXT:    store i64 [[TMP17]], i64* [[TMP0]], align 8
5417 // CHECK4-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
5418 // CHECK4:       .omp.linear.pu.done:
5419 // CHECK4-NEXT:    ret void
5420 //
5421 //
5422 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
5423 // CHECK4-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR2]] {
5424 // CHECK4-NEXT:  entry:
5425 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5426 // CHECK4-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
5427 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5428 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
5429 // CHECK4-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
5430 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
5431 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5432 // CHECK4-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
5433 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5434 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5435 // CHECK4-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
5436 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
5437 // CHECK4-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
5438 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
5439 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
5440 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4
5441 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
5442 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
5443 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[A_CASTED]], align 4
5444 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4
5445 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]])
5446 // CHECK4-NEXT:    ret void
5447 //
5448 //
5449 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
5450 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
5451 // CHECK4-NEXT:  entry:
5452 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5453 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5454 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5455 // CHECK4-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
5456 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5457 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
5458 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i64, align 4
5459 // CHECK4-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
5460 // CHECK4-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i32, align 4
5461 // CHECK4-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
5462 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
5463 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
5464 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
5465 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5466 // CHECK4-NEXT:    [[IT:%.*]] = alloca i64, align 8
5467 // CHECK4-NEXT:    [[LIN2:%.*]] = alloca i32, align 4
5468 // CHECK4-NEXT:    [[A3:%.*]] = alloca i32, align 4
5469 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5470 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5471 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5472 // CHECK4-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
5473 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5474 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5475 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
5476 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
5477 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
5478 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4
5479 // CHECK4-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
5480 // CHECK4-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
5481 // CHECK4-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
5482 // CHECK4-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
5483 // CHECK4-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
5484 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5485 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5486 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
5487 // CHECK4-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]])
5488 // CHECK4-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
5489 // CHECK4-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
5490 // CHECK4-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
5491 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5492 // CHECK4:       cond.true:
5493 // CHECK4-NEXT:    br label [[COND_END:%.*]]
5494 // CHECK4:       cond.false:
5495 // CHECK4-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
5496 // CHECK4-NEXT:    br label [[COND_END]]
5497 // CHECK4:       cond.end:
5498 // CHECK4-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
5499 // CHECK4-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
5500 // CHECK4-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
5501 // CHECK4-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
5502 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5503 // CHECK4:       omp.inner.for.cond:
5504 // CHECK4-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
5505 // CHECK4-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30
5506 // CHECK4-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
5507 // CHECK4-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5508 // CHECK4:       omp.inner.for.body:
5509 // CHECK4-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
5510 // CHECK4-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
5511 // CHECK4-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
5512 // CHECK4-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !30
5513 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !30
5514 // CHECK4-NEXT:    [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
5515 // CHECK4-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
5516 // CHECK4-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30
5517 // CHECK4-NEXT:    [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]]
5518 // CHECK4-NEXT:    [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]]
5519 // CHECK4-NEXT:    [[CONV7:%.*]] = trunc i64 [[ADD]] to i32
5520 // CHECK4-NEXT:    store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !30
5521 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !30
5522 // CHECK4-NEXT:    [[CONV8:%.*]] = sext i32 [[TMP13]] to i64
5523 // CHECK4-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
5524 // CHECK4-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30
5525 // CHECK4-NEXT:    [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]]
5526 // CHECK4-NEXT:    [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]]
5527 // CHECK4-NEXT:    [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32
5528 // CHECK4-NEXT:    store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !30
5529 // CHECK4-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
5530 // CHECK4-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP16]] to i32
5531 // CHECK4-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
5532 // CHECK4-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
5533 // CHECK4-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !30
5534 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5535 // CHECK4:       omp.body.continue:
5536 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5537 // CHECK4:       omp.inner.for.inc:
5538 // CHECK4-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
5539 // CHECK4-NEXT:    [[ADD15:%.*]] = add i64 [[TMP17]], 1
5540 // CHECK4-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
5541 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
5542 // CHECK4:       omp.inner.for.end:
5543 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5544 // CHECK4:       omp.loop.exit:
5545 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
5546 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5547 // CHECK4-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
5548 // CHECK4-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5549 // CHECK4:       .omp.final.then:
5550 // CHECK4-NEXT:    store i64 400, i64* [[IT]], align 8
5551 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5552 // CHECK4:       .omp.final.done:
5553 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5554 // CHECK4-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
5555 // CHECK4-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
5556 // CHECK4:       .omp.linear.pu:
5557 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4
5558 // CHECK4-NEXT:    store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4
5559 // CHECK4-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A3]], align 4
5560 // CHECK4-NEXT:    store i32 [[TMP23]], i32* [[A_ADDR]], align 4
5561 // CHECK4-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
5562 // CHECK4:       .omp.linear.pu.done:
5563 // CHECK4-NEXT:    ret void
5564 //
5565 //
5566 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
5567 // CHECK4-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] {
5568 // CHECK4-NEXT:  entry:
5569 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5570 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5571 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
5572 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
5573 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5574 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5575 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5576 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
5577 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
5578 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
5579 // CHECK4-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
5580 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
5581 // CHECK4-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
5582 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
5583 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
5584 // CHECK4-NEXT:    ret void
5585 //
5586 //
5587 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4
5588 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
5589 // CHECK4-NEXT:  entry:
5590 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5591 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5592 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5593 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
5594 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5595 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i16, align 2
5596 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5597 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5598 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5599 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5600 // CHECK4-NEXT:    [[IT:%.*]] = alloca i16, align 2
5601 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5602 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5603 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5604 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
5605 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
5606 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5607 // CHECK4-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
5608 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5609 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5610 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5611 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
5612 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5613 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5614 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
5615 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5616 // CHECK4:       cond.true:
5617 // CHECK4-NEXT:    br label [[COND_END:%.*]]
5618 // CHECK4:       cond.false:
5619 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5620 // CHECK4-NEXT:    br label [[COND_END]]
5621 // CHECK4:       cond.end:
5622 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
5623 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5624 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5625 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
5626 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5627 // CHECK4:       omp.inner.for.cond:
5628 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
5629 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
5630 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
5631 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5632 // CHECK4:       omp.inner.for.body:
5633 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
5634 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
5635 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
5636 // CHECK4-NEXT:    [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
5637 // CHECK4-NEXT:    store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !33
5638 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33
5639 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
5640 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !33
5641 // CHECK4-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !33
5642 // CHECK4-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
5643 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
5644 // CHECK4-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
5645 // CHECK4-NEXT:    store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group !33
5646 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5647 // CHECK4:       omp.body.continue:
5648 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5649 // CHECK4:       omp.inner.for.inc:
5650 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
5651 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
5652 // CHECK4-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
5653 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
5654 // CHECK4:       omp.inner.for.end:
5655 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5656 // CHECK4:       omp.loop.exit:
5657 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
5658 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5659 // CHECK4-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
5660 // CHECK4-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5661 // CHECK4:       .omp.final.then:
5662 // CHECK4-NEXT:    store i16 22, i16* [[IT]], align 2
5663 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5664 // CHECK4:       .omp.final.done:
5665 // CHECK4-NEXT:    ret void
5666 //
5667 //
5668 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
5669 // CHECK4-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
5670 // CHECK4-NEXT:  entry:
5671 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5672 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
5673 // CHECK4-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
5674 // CHECK4-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
5675 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
5676 // CHECK4-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
5677 // CHECK4-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
5678 // CHECK4-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
5679 // CHECK4-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
5680 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
5681 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
5682 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
5683 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5684 // CHECK4-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
5685 // CHECK4-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
5686 // CHECK4-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
5687 // CHECK4-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
5688 // CHECK4-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
5689 // CHECK4-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
5690 // CHECK4-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
5691 // CHECK4-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
5692 // CHECK4-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5693 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
5694 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
5695 // CHECK4-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
5696 // CHECK4-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
5697 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
5698 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
5699 // CHECK4-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
5700 // CHECK4-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
5701 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
5702 // CHECK4-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
5703 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
5704 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5705 // CHECK4-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
5706 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
5707 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]])
5708 // CHECK4-NEXT:    ret void
5709 //
5710 //
5711 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7
5712 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
5713 // CHECK4-NEXT:  entry:
5714 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
5715 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
5716 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
5717 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
5718 // CHECK4-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
5719 // CHECK4-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
5720 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
5721 // CHECK4-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
5722 // CHECK4-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
5723 // CHECK4-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
5724 // CHECK4-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
5725 // CHECK4-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
5726 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5727 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i8, align 1
5728 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5729 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5730 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5731 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5732 // CHECK4-NEXT:    [[IT:%.*]] = alloca i8, align 1
5733 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
5734 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
5735 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
5736 // CHECK4-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
5737 // CHECK4-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
5738 // CHECK4-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
5739 // CHECK4-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
5740 // CHECK4-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
5741 // CHECK4-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
5742 // CHECK4-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
5743 // CHECK4-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
5744 // CHECK4-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5745 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
5746 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
5747 // CHECK4-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
5748 // CHECK4-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
5749 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
5750 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
5751 // CHECK4-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
5752 // CHECK4-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
5753 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5754 // CHECK4-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
5755 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5756 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5757 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
5758 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
5759 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
5760 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
5761 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
5762 // CHECK4:       omp.dispatch.cond:
5763 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5764 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
5765 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5766 // CHECK4:       cond.true:
5767 // CHECK4-NEXT:    br label [[COND_END:%.*]]
5768 // CHECK4:       cond.false:
5769 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5770 // CHECK4-NEXT:    br label [[COND_END]]
5771 // CHECK4:       cond.end:
5772 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
5773 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5774 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5775 // CHECK4-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
5776 // CHECK4-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
5777 // CHECK4-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5778 // CHECK4-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
5779 // CHECK4-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
5780 // CHECK4:       omp.dispatch.body:
5781 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5782 // CHECK4:       omp.inner.for.cond:
5783 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
5784 // CHECK4-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
5785 // CHECK4-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
5786 // CHECK4-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5787 // CHECK4:       omp.inner.for.body:
5788 // CHECK4-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
5789 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
5790 // CHECK4-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
5791 // CHECK4-NEXT:    [[CONV:%.*]] = trunc i32 [[SUB]] to i8
5792 // CHECK4-NEXT:    store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !36
5793 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36
5794 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
5795 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !36
5796 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
5797 // CHECK4-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36
5798 // CHECK4-NEXT:    [[CONV7:%.*]] = fpext float [[TMP20]] to double
5799 // CHECK4-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
5800 // CHECK4-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
5801 // CHECK4-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36
5802 // CHECK4-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
5803 // CHECK4-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
5804 // CHECK4-NEXT:    [[CONV11:%.*]] = fpext float [[TMP21]] to double
5805 // CHECK4-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
5806 // CHECK4-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
5807 // CHECK4-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
5808 // CHECK4-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
5809 // CHECK4-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2
5810 // CHECK4-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
5811 // CHECK4-NEXT:    [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00
5812 // CHECK4-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
5813 // CHECK4-NEXT:    [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]]
5814 // CHECK4-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]]
5815 // CHECK4-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3
5816 // CHECK4-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
5817 // CHECK4-NEXT:    [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00
5818 // CHECK4-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
5819 // CHECK4-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
5820 // CHECK4-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36
5821 // CHECK4-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1
5822 // CHECK4-NEXT:    store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !36
5823 // CHECK4-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
5824 // CHECK4-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36
5825 // CHECK4-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP26]] to i32
5826 // CHECK4-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
5827 // CHECK4-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
5828 // CHECK4-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !36
5829 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5830 // CHECK4:       omp.body.continue:
5831 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5832 // CHECK4:       omp.inner.for.inc:
5833 // CHECK4-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
5834 // CHECK4-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1
5835 // CHECK4-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
5836 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
5837 // CHECK4:       omp.inner.for.end:
5838 // CHECK4-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
5839 // CHECK4:       omp.dispatch.inc:
5840 // CHECK4-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5841 // CHECK4-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5842 // CHECK4-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
5843 // CHECK4-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4
5844 // CHECK4-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5845 // CHECK4-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
5846 // CHECK4-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
5847 // CHECK4-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4
5848 // CHECK4-NEXT:    br label [[OMP_DISPATCH_COND]]
5849 // CHECK4:       omp.dispatch.end:
5850 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
5851 // CHECK4-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5852 // CHECK4-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
5853 // CHECK4-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5854 // CHECK4:       .omp.final.then:
5855 // CHECK4-NEXT:    store i8 96, i8* [[IT]], align 1
5856 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5857 // CHECK4:       .omp.final.done:
5858 // CHECK4-NEXT:    ret void
5859 //
5860 //
5861 // CHECK4-LABEL: define {{[^@]+}}@_Z3bari
5862 // CHECK4-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
5863 // CHECK4-NEXT:  entry:
5864 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5865 // CHECK4-NEXT:    [[A:%.*]] = alloca i32, align 4
5866 // CHECK4-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
5867 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5868 // CHECK4-NEXT:    store i32 0, i32* [[A]], align 4
5869 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
5870 // CHECK4-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
5871 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
5872 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
5873 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
5874 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
5875 // CHECK4-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
5876 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
5877 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
5878 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
5879 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
5880 // CHECK4-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
5881 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
5882 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
5883 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
5884 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
5885 // CHECK4-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
5886 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
5887 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
5888 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
5889 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
5890 // CHECK4-NEXT:    ret i32 [[TMP8]]
5891 //
5892 //
5893 // CHECK4-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
5894 // CHECK4-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
5895 // CHECK4-NEXT:  entry:
5896 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
5897 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
5898 // CHECK4-NEXT:    [[B:%.*]] = alloca i32, align 4
5899 // CHECK4-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
5900 // CHECK4-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
5901 // CHECK4-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
5902 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4
5903 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4
5904 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4
5905 // CHECK4-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4
5906 // CHECK4-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
5907 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
5908 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
5909 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
5910 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
5911 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
5912 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
5913 // CHECK4-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
5914 // CHECK4-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
5915 // CHECK4-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
5916 // CHECK4-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
5917 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
5918 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B]], align 4
5919 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
5920 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
5921 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
5922 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60
5923 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
5924 // CHECK4:       omp_if.then:
5925 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
5926 // CHECK4-NEXT:    [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]]
5927 // CHECK4-NEXT:    [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2
5928 // CHECK4-NEXT:    [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
5929 // CHECK4-NEXT:    [[TMP10:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
5930 // CHECK4-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP10]], i8* align 4 bitcast ([5 x i64]* @.offload_sizes.11 to i8*), i32 40, i1 false)
5931 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5932 // CHECK4-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to %struct.S1**
5933 // CHECK4-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP12]], align 4
5934 // CHECK4-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5935 // CHECK4-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to double**
5936 // CHECK4-NEXT:    store double* [[A]], double** [[TMP14]], align 4
5937 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
5938 // CHECK4-NEXT:    store i8* null, i8** [[TMP15]], align 4
5939 // CHECK4-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
5940 // CHECK4-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32*
5941 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[TMP17]], align 4
5942 // CHECK4-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
5943 // CHECK4-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32*
5944 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[TMP19]], align 4
5945 // CHECK4-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
5946 // CHECK4-NEXT:    store i8* null, i8** [[TMP20]], align 4
5947 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
5948 // CHECK4-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
5949 // CHECK4-NEXT:    store i32 2, i32* [[TMP22]], align 4
5950 // CHECK4-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
5951 // CHECK4-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32*
5952 // CHECK4-NEXT:    store i32 2, i32* [[TMP24]], align 4
5953 // CHECK4-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
5954 // CHECK4-NEXT:    store i8* null, i8** [[TMP25]], align 4
5955 // CHECK4-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
5956 // CHECK4-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
5957 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP27]], align 4
5958 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
5959 // CHECK4-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
5960 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP29]], align 4
5961 // CHECK4-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
5962 // CHECK4-NEXT:    store i8* null, i8** [[TMP30]], align 4
5963 // CHECK4-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
5964 // CHECK4-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i16**
5965 // CHECK4-NEXT:    store i16* [[VLA]], i16** [[TMP32]], align 4
5966 // CHECK4-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
5967 // CHECK4-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16**
5968 // CHECK4-NEXT:    store i16* [[VLA]], i16** [[TMP34]], align 4
5969 // CHECK4-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
5970 // CHECK4-NEXT:    store i64 [[TMP9]], i64* [[TMP35]], align 4
5971 // CHECK4-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
5972 // CHECK4-NEXT:    store i8* null, i8** [[TMP36]], align 4
5973 // CHECK4-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5974 // CHECK4-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5975 // CHECK4-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
5976 // CHECK4-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* [[TMP39]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
5977 // CHECK4-NEXT:    [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
5978 // CHECK4-NEXT:    br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5979 // CHECK4:       omp_offload.failed:
5980 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]]
5981 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
5982 // CHECK4:       omp_offload.cont:
5983 // CHECK4-NEXT:    br label [[OMP_IF_END:%.*]]
5984 // CHECK4:       omp_if.else:
5985 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]]
5986 // CHECK4-NEXT:    br label [[OMP_IF_END]]
5987 // CHECK4:       omp_if.end:
5988 // CHECK4-NEXT:    [[TMP42:%.*]] = mul nsw i32 1, [[TMP1]]
5989 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP42]]
5990 // CHECK4-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
5991 // CHECK4-NEXT:    [[TMP43:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
5992 // CHECK4-NEXT:    [[CONV:%.*]] = sext i16 [[TMP43]] to i32
5993 // CHECK4-NEXT:    [[TMP44:%.*]] = load i32, i32* [[B]], align 4
5994 // CHECK4-NEXT:    [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP44]]
5995 // CHECK4-NEXT:    [[TMP45:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
5996 // CHECK4-NEXT:    call void @llvm.stackrestore(i8* [[TMP45]])
5997 // CHECK4-NEXT:    ret i32 [[ADD3]]
5998 //
5999 //
6000 // CHECK4-LABEL: define {{[^@]+}}@_ZL7fstatici
6001 // CHECK4-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
6002 // CHECK4-NEXT:  entry:
6003 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6004 // CHECK4-NEXT:    [[A:%.*]] = alloca i32, align 4
6005 // CHECK4-NEXT:    [[AA:%.*]] = alloca i16, align 2
6006 // CHECK4-NEXT:    [[AAA:%.*]] = alloca i8, align 1
6007 // CHECK4-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
6008 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
6009 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
6010 // CHECK4-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
6011 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
6012 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
6013 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
6014 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6015 // CHECK4-NEXT:    store i32 0, i32* [[A]], align 4
6016 // CHECK4-NEXT:    store i16 0, i16* [[AA]], align 2
6017 // CHECK4-NEXT:    store i8 0, i8* [[AAA]], align 1
6018 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
6019 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
6020 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
6021 // CHECK4-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
6022 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
6023 // CHECK4-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
6024 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
6025 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
6026 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
6027 // CHECK4-NEXT:    store i8 [[TMP4]], i8* [[CONV1]], align 1
6028 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
6029 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
6030 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
6031 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
6032 // CHECK4:       omp_if.then:
6033 // CHECK4-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6034 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
6035 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
6036 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6037 // CHECK4-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
6038 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
6039 // CHECK4-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
6040 // CHECK4-NEXT:    store i8* null, i8** [[TMP11]], align 4
6041 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
6042 // CHECK4-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
6043 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
6044 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
6045 // CHECK4-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
6046 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
6047 // CHECK4-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
6048 // CHECK4-NEXT:    store i8* null, i8** [[TMP16]], align 4
6049 // CHECK4-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
6050 // CHECK4-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
6051 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[TMP18]], align 4
6052 // CHECK4-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
6053 // CHECK4-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
6054 // CHECK4-NEXT:    store i32 [[TMP5]], i32* [[TMP20]], align 4
6055 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
6056 // CHECK4-NEXT:    store i8* null, i8** [[TMP21]], align 4
6057 // CHECK4-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
6058 // CHECK4-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
6059 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4
6060 // CHECK4-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
6061 // CHECK4-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
6062 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4
6063 // CHECK4-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
6064 // CHECK4-NEXT:    store i8* null, i8** [[TMP26]], align 4
6065 // CHECK4-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6066 // CHECK4-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6067 // CHECK4-NEXT:    [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
6068 // CHECK4-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
6069 // CHECK4-NEXT:    br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
6070 // CHECK4:       omp_offload.failed:
6071 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
6072 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
6073 // CHECK4:       omp_offload.cont:
6074 // CHECK4-NEXT:    br label [[OMP_IF_END:%.*]]
6075 // CHECK4:       omp_if.else:
6076 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
6077 // CHECK4-NEXT:    br label [[OMP_IF_END]]
6078 // CHECK4:       omp_if.end:
6079 // CHECK4-NEXT:    [[TMP31:%.*]] = load i32, i32* [[A]], align 4
6080 // CHECK4-NEXT:    ret i32 [[TMP31]]
6081 //
6082 //
6083 // CHECK4-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
6084 // CHECK4-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
6085 // CHECK4-NEXT:  entry:
6086 // CHECK4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6087 // CHECK4-NEXT:    [[A:%.*]] = alloca i32, align 4
6088 // CHECK4-NEXT:    [[AA:%.*]] = alloca i16, align 2
6089 // CHECK4-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
6090 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
6091 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
6092 // CHECK4-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
6093 // CHECK4-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
6094 // CHECK4-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
6095 // CHECK4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6096 // CHECK4-NEXT:    store i32 0, i32* [[A]], align 4
6097 // CHECK4-NEXT:    store i16 0, i16* [[AA]], align 2
6098 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
6099 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
6100 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
6101 // CHECK4-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
6102 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
6103 // CHECK4-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
6104 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
6105 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
6106 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
6107 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
6108 // CHECK4:       omp_if.then:
6109 // CHECK4-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6110 // CHECK4-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
6111 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
6112 // CHECK4-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6113 // CHECK4-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
6114 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
6115 // CHECK4-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
6116 // CHECK4-NEXT:    store i8* null, i8** [[TMP9]], align 4
6117 // CHECK4-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
6118 // CHECK4-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
6119 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
6120 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
6121 // CHECK4-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
6122 // CHECK4-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
6123 // CHECK4-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
6124 // CHECK4-NEXT:    store i8* null, i8** [[TMP14]], align 4
6125 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
6126 // CHECK4-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
6127 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
6128 // CHECK4-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
6129 // CHECK4-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
6130 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
6131 // CHECK4-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
6132 // CHECK4-NEXT:    store i8* null, i8** [[TMP19]], align 4
6133 // CHECK4-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6134 // CHECK4-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6135 // CHECK4-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
6136 // CHECK4-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
6137 // CHECK4-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
6138 // CHECK4:       omp_offload.failed:
6139 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
6140 // CHECK4-NEXT:    br label [[OMP_OFFLOAD_CONT]]
6141 // CHECK4:       omp_offload.cont:
6142 // CHECK4-NEXT:    br label [[OMP_IF_END:%.*]]
6143 // CHECK4:       omp_if.else:
6144 // CHECK4-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
6145 // CHECK4-NEXT:    br label [[OMP_IF_END]]
6146 // CHECK4:       omp_if.end:
6147 // CHECK4-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
6148 // CHECK4-NEXT:    ret i32 [[TMP24]]
6149 //
6150 //
6151 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
6152 // CHECK4-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] {
6153 // CHECK4-NEXT:  entry:
6154 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
6155 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
6156 // CHECK4-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
6157 // CHECK4-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
6158 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
6159 // CHECK4-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
6160 // CHECK4-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
6161 // CHECK4-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
6162 // CHECK4-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
6163 // CHECK4-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
6164 // CHECK4-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
6165 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
6166 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
6167 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
6168 // CHECK4-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
6169 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
6170 // CHECK4-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
6171 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
6172 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
6173 // CHECK4-NEXT:    ret void
6174 //
6175 //
6176 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10
6177 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] {
6178 // CHECK4-NEXT:  entry:
6179 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6180 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6181 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
6182 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
6183 // CHECK4-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
6184 // CHECK4-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
6185 // CHECK4-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
6186 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
6187 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i64, align 4
6188 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
6189 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
6190 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
6191 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6192 // CHECK4-NEXT:    [[IT:%.*]] = alloca i64, align 8
6193 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6194 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6195 // CHECK4-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
6196 // CHECK4-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
6197 // CHECK4-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
6198 // CHECK4-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
6199 // CHECK4-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
6200 // CHECK4-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
6201 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
6202 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
6203 // CHECK4-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
6204 // CHECK4-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
6205 // CHECK4-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
6206 // CHECK4-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
6207 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6208 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6209 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
6210 // CHECK4-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
6211 // CHECK4-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
6212 // CHECK4-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3
6213 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6214 // CHECK4:       cond.true:
6215 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6216 // CHECK4:       cond.false:
6217 // CHECK4-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
6218 // CHECK4-NEXT:    br label [[COND_END]]
6219 // CHECK4:       cond.end:
6220 // CHECK4-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
6221 // CHECK4-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
6222 // CHECK4-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
6223 // CHECK4-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
6224 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6225 // CHECK4:       omp.inner.for.cond:
6226 // CHECK4-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
6227 // CHECK4-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !39
6228 // CHECK4-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]]
6229 // CHECK4-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6230 // CHECK4:       omp.inner.for.body:
6231 // CHECK4-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
6232 // CHECK4-NEXT:    [[MUL:%.*]] = mul i64 [[TMP11]], 400
6233 // CHECK4-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
6234 // CHECK4-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !39
6235 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39
6236 // CHECK4-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP12]] to double
6237 // CHECK4-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
6238 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
6239 // CHECK4-NEXT:    store double [[ADD]], double* [[A]], align 4, !llvm.access.group !39
6240 // CHECK4-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
6241 // CHECK4-NEXT:    [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !39
6242 // CHECK4-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
6243 // CHECK4-NEXT:    store double [[INC]], double* [[A4]], align 4, !llvm.access.group !39
6244 // CHECK4-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
6245 // CHECK4-NEXT:    [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]]
6246 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]]
6247 // CHECK4-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
6248 // CHECK4-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !39
6249 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6250 // CHECK4:       omp.body.continue:
6251 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6252 // CHECK4:       omp.inner.for.inc:
6253 // CHECK4-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
6254 // CHECK4-NEXT:    [[ADD7:%.*]] = add i64 [[TMP15]], 1
6255 // CHECK4-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
6256 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
6257 // CHECK4:       omp.inner.for.end:
6258 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6259 // CHECK4:       omp.loop.exit:
6260 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
6261 // CHECK4-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6262 // CHECK4-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
6263 // CHECK4-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6264 // CHECK4:       .omp.final.then:
6265 // CHECK4-NEXT:    store i64 400, i64* [[IT]], align 8
6266 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6267 // CHECK4:       .omp.final.done:
6268 // CHECK4-NEXT:    ret void
6269 //
6270 //
6271 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
6272 // CHECK4-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
6273 // CHECK4-NEXT:  entry:
6274 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6275 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6276 // CHECK4-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
6277 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
6278 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
6279 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
6280 // CHECK4-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
6281 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6282 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6283 // CHECK4-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
6284 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
6285 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6286 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
6287 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
6288 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
6289 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
6290 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
6291 // CHECK4-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
6292 // CHECK4-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
6293 // CHECK4-NEXT:    store i16 [[TMP3]], i16* [[CONV2]], align 2
6294 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
6295 // CHECK4-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1
6296 // CHECK4-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
6297 // CHECK4-NEXT:    store i8 [[TMP5]], i8* [[CONV3]], align 1
6298 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
6299 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
6300 // CHECK4-NEXT:    ret void
6301 //
6302 //
6303 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13
6304 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
6305 // CHECK4-NEXT:  entry:
6306 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6307 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6308 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6309 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6310 // CHECK4-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
6311 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
6312 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6313 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6314 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6315 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6316 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6317 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6318 // CHECK4-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
6319 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
6320 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6321 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
6322 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
6323 // CHECK4-NEXT:    ret void
6324 //
6325 //
6326 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
6327 // CHECK4-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
6328 // CHECK4-NEXT:  entry:
6329 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6330 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6331 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
6332 // CHECK4-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
6333 // CHECK4-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
6334 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6335 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6336 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
6337 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6338 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
6339 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
6340 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
6341 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
6342 // CHECK4-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
6343 // CHECK4-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
6344 // CHECK4-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
6345 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
6346 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
6347 // CHECK4-NEXT:    ret void
6348 //
6349 //
6350 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16
6351 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
6352 // CHECK4-NEXT:  entry:
6353 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
6354 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
6355 // CHECK4-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
6356 // CHECK4-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
6357 // CHECK4-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
6358 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
6359 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i64, align 4
6360 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
6361 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
6362 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
6363 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6364 // CHECK4-NEXT:    [[I:%.*]] = alloca i64, align 8
6365 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
6366 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
6367 // CHECK4-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
6368 // CHECK4-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
6369 // CHECK4-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
6370 // CHECK4-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
6371 // CHECK4-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
6372 // CHECK4-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
6373 // CHECK4-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
6374 // CHECK4-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
6375 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6376 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
6377 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
6378 // CHECK4-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
6379 // CHECK4-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
6380 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
6381 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6382 // CHECK4:       cond.true:
6383 // CHECK4-NEXT:    br label [[COND_END:%.*]]
6384 // CHECK4:       cond.false:
6385 // CHECK4-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
6386 // CHECK4-NEXT:    br label [[COND_END]]
6387 // CHECK4:       cond.end:
6388 // CHECK4-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6389 // CHECK4-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
6390 // CHECK4-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
6391 // CHECK4-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
6392 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6393 // CHECK4:       omp.inner.for.cond:
6394 // CHECK4-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42
6395 // CHECK4-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !42
6396 // CHECK4-NEXT:    [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
6397 // CHECK4-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6398 // CHECK4:       omp.inner.for.body:
6399 // CHECK4-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42
6400 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
6401 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
6402 // CHECK4-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !42
6403 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !42
6404 // CHECK4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
6405 // CHECK4-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !42
6406 // CHECK4-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !42
6407 // CHECK4-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
6408 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
6409 // CHECK4-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
6410 // CHECK4-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !42
6411 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
6412 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !42
6413 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
6414 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !42
6415 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6416 // CHECK4:       omp.body.continue:
6417 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6418 // CHECK4:       omp.inner.for.inc:
6419 // CHECK4-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42
6420 // CHECK4-NEXT:    [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1
6421 // CHECK4-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !42
6422 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
6423 // CHECK4:       omp.inner.for.end:
6424 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6425 // CHECK4:       omp.loop.exit:
6426 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
6427 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6428 // CHECK4-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
6429 // CHECK4-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6430 // CHECK4:       .omp.final.then:
6431 // CHECK4-NEXT:    store i64 11, i64* [[I]], align 8
6432 // CHECK4-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6433 // CHECK4:       .omp.final.done:
6434 // CHECK4-NEXT:    ret void
6435 //
6436 //
6437 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
6438 // CHECK4-SAME: () #[[ATTR8:[0-9]+]] {
6439 // CHECK4-NEXT:  entry:
6440 // CHECK4-NEXT:    call void @__tgt_register_requires(i64 1)
6441 // CHECK4-NEXT:    ret void
6442 //
6443 //
6444 // CHECK5-LABEL: define {{[^@]+}}@_Z7get_valv
6445 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
6446 // CHECK5-NEXT:  entry:
6447 // CHECK5-NEXT:    ret i64 0
6448 //
6449 //
6450 // CHECK5-LABEL: define {{[^@]+}}@_Z3fooi
6451 // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
6452 // CHECK5-NEXT:  entry:
6453 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
6454 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
6455 // CHECK5-NEXT:    [[AA:%.*]] = alloca i16, align 2
6456 // CHECK5-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
6457 // CHECK5-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
6458 // CHECK5-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
6459 // CHECK5-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
6460 // CHECK5-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
6461 // CHECK5-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
6462 // CHECK5-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
6463 // CHECK5-NEXT:    [[K:%.*]] = alloca i64, align 8
6464 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
6465 // CHECK5-NEXT:    [[K_CASTED:%.*]] = alloca i64, align 8
6466 // CHECK5-NEXT:    [[LIN:%.*]] = alloca i32, align 4
6467 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
6468 // CHECK5-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
6469 // CHECK5-NEXT:    [[A_CASTED4:%.*]] = alloca i64, align 8
6470 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
6471 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
6472 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
6473 // CHECK5-NEXT:    [[A_CASTED6:%.*]] = alloca i64, align 8
6474 // CHECK5-NEXT:    [[AA_CASTED8:%.*]] = alloca i64, align 8
6475 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8
6476 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8
6477 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8
6478 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6479 // CHECK5-NEXT:    [[A_CASTED15:%.*]] = alloca i64, align 8
6480 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
6481 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8
6482 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8
6483 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8
6484 // CHECK5-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8
6485 // CHECK5-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
6486 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
6487 // CHECK5-NEXT:    store i32 0, i32* [[A]], align 4
6488 // CHECK5-NEXT:    store i16 0, i16* [[AA]], align 2
6489 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
6490 // CHECK5-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
6491 // CHECK5-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
6492 // CHECK5-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
6493 // CHECK5-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
6494 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
6495 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
6496 // CHECK5-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
6497 // CHECK5-NEXT:    [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
6498 // CHECK5-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
6499 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
6500 // CHECK5-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
6501 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates*
6502 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0
6503 // CHECK5-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]])
6504 // CHECK5-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
6505 // CHECK5-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
6506 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A]], align 4
6507 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
6508 // CHECK5-NEXT:    store i32 [[TMP11]], i32* [[CONV]], align 4
6509 // CHECK5-NEXT:    [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8
6510 // CHECK5-NEXT:    [[TMP13:%.*]] = load i64, i64* [[K]], align 8
6511 // CHECK5-NEXT:    store i64 [[TMP13]], i64* [[K_CASTED]], align 8
6512 // CHECK5-NEXT:    [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8
6513 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]]
6514 // CHECK5-NEXT:    store i32 12, i32* [[LIN]], align 4
6515 // CHECK5-NEXT:    [[TMP15:%.*]] = load i16, i16* [[AA]], align 2
6516 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
6517 // CHECK5-NEXT:    store i16 [[TMP15]], i16* [[CONV2]], align 2
6518 // CHECK5-NEXT:    [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8
6519 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4
6520 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
6521 // CHECK5-NEXT:    store i32 [[TMP17]], i32* [[CONV3]], align 4
6522 // CHECK5-NEXT:    [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
6523 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A]], align 4
6524 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32*
6525 // CHECK5-NEXT:    store i32 [[TMP19]], i32* [[CONV5]], align 4
6526 // CHECK5-NEXT:    [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8
6527 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6528 // CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
6529 // CHECK5-NEXT:    store i64 [[TMP16]], i64* [[TMP22]], align 8
6530 // CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6531 // CHECK5-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64*
6532 // CHECK5-NEXT:    store i64 [[TMP16]], i64* [[TMP24]], align 8
6533 // CHECK5-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
6534 // CHECK5-NEXT:    store i8* null, i8** [[TMP25]], align 8
6535 // CHECK5-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
6536 // CHECK5-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
6537 // CHECK5-NEXT:    store i64 [[TMP18]], i64* [[TMP27]], align 8
6538 // CHECK5-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
6539 // CHECK5-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
6540 // CHECK5-NEXT:    store i64 [[TMP18]], i64* [[TMP29]], align 8
6541 // CHECK5-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
6542 // CHECK5-NEXT:    store i8* null, i8** [[TMP30]], align 8
6543 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
6544 // CHECK5-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
6545 // CHECK5-NEXT:    store i64 [[TMP20]], i64* [[TMP32]], align 8
6546 // CHECK5-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
6547 // CHECK5-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64*
6548 // CHECK5-NEXT:    store i64 [[TMP20]], i64* [[TMP34]], align 8
6549 // CHECK5-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
6550 // CHECK5-NEXT:    store i8* null, i8** [[TMP35]], align 8
6551 // CHECK5-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6552 // CHECK5-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6553 // CHECK5-NEXT:    [[TMP38:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
6554 // CHECK5-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
6555 // CHECK5-NEXT:    br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
6556 // CHECK5:       omp_offload.failed:
6557 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]]
6558 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
6559 // CHECK5:       omp_offload.cont:
6560 // CHECK5-NEXT:    [[TMP40:%.*]] = load i32, i32* [[A]], align 4
6561 // CHECK5-NEXT:    [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32*
6562 // CHECK5-NEXT:    store i32 [[TMP40]], i32* [[CONV7]], align 4
6563 // CHECK5-NEXT:    [[TMP41:%.*]] = load i64, i64* [[A_CASTED6]], align 8
6564 // CHECK5-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2
6565 // CHECK5-NEXT:    [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16*
6566 // CHECK5-NEXT:    store i16 [[TMP42]], i16* [[CONV9]], align 2
6567 // CHECK5-NEXT:    [[TMP43:%.*]] = load i64, i64* [[AA_CASTED8]], align 8
6568 // CHECK5-NEXT:    [[TMP44:%.*]] = load i32, i32* [[N_ADDR]], align 4
6569 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP44]], 10
6570 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
6571 // CHECK5:       omp_if.then:
6572 // CHECK5-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
6573 // CHECK5-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64*
6574 // CHECK5-NEXT:    store i64 [[TMP41]], i64* [[TMP46]], align 8
6575 // CHECK5-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
6576 // CHECK5-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64*
6577 // CHECK5-NEXT:    store i64 [[TMP41]], i64* [[TMP48]], align 8
6578 // CHECK5-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
6579 // CHECK5-NEXT:    store i8* null, i8** [[TMP49]], align 8
6580 // CHECK5-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1
6581 // CHECK5-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64*
6582 // CHECK5-NEXT:    store i64 [[TMP43]], i64* [[TMP51]], align 8
6583 // CHECK5-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1
6584 // CHECK5-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
6585 // CHECK5-NEXT:    store i64 [[TMP43]], i64* [[TMP53]], align 8
6586 // CHECK5-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1
6587 // CHECK5-NEXT:    store i8* null, i8** [[TMP54]], align 8
6588 // CHECK5-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
6589 // CHECK5-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
6590 // CHECK5-NEXT:    [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
6591 // CHECK5-NEXT:    [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
6592 // CHECK5-NEXT:    br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
6593 // CHECK5:       omp_offload.failed13:
6594 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]]
6595 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT14]]
6596 // CHECK5:       omp_offload.cont14:
6597 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
6598 // CHECK5:       omp_if.else:
6599 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]]
6600 // CHECK5-NEXT:    br label [[OMP_IF_END]]
6601 // CHECK5:       omp_if.end:
6602 // CHECK5-NEXT:    [[TMP59:%.*]] = load i32, i32* [[A]], align 4
6603 // CHECK5-NEXT:    store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR_]], align 4
6604 // CHECK5-NEXT:    [[TMP60:%.*]] = load i32, i32* [[A]], align 4
6605 // CHECK5-NEXT:    [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32*
6606 // CHECK5-NEXT:    store i32 [[TMP60]], i32* [[CONV16]], align 4
6607 // CHECK5-NEXT:    [[TMP61:%.*]] = load i64, i64* [[A_CASTED15]], align 8
6608 // CHECK5-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6609 // CHECK5-NEXT:    [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
6610 // CHECK5-NEXT:    store i32 [[TMP62]], i32* [[CONV17]], align 4
6611 // CHECK5-NEXT:    [[TMP63:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
6612 // CHECK5-NEXT:    [[TMP64:%.*]] = load i32, i32* [[N_ADDR]], align 4
6613 // CHECK5-NEXT:    [[CMP18:%.*]] = icmp sgt i32 [[TMP64]], 20
6614 // CHECK5-NEXT:    br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]]
6615 // CHECK5:       omp_if.then19:
6616 // CHECK5-NEXT:    [[TMP65:%.*]] = mul nuw i64 [[TMP2]], 4
6617 // CHECK5-NEXT:    [[TMP66:%.*]] = mul nuw i64 5, [[TMP5]]
6618 // CHECK5-NEXT:    [[TMP67:%.*]] = mul nuw i64 [[TMP66]], 8
6619 // CHECK5-NEXT:    [[TMP68:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
6620 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP68]], i8* align 8 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i64 80, i1 false)
6621 // CHECK5-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
6622 // CHECK5-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
6623 // CHECK5-NEXT:    store i64 [[TMP61]], i64* [[TMP70]], align 8
6624 // CHECK5-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
6625 // CHECK5-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
6626 // CHECK5-NEXT:    store i64 [[TMP61]], i64* [[TMP72]], align 8
6627 // CHECK5-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
6628 // CHECK5-NEXT:    store i8* null, i8** [[TMP73]], align 8
6629 // CHECK5-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
6630 // CHECK5-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]**
6631 // CHECK5-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8
6632 // CHECK5-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
6633 // CHECK5-NEXT:    [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]**
6634 // CHECK5-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8
6635 // CHECK5-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
6636 // CHECK5-NEXT:    store i8* null, i8** [[TMP78]], align 8
6637 // CHECK5-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
6638 // CHECK5-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64*
6639 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[TMP80]], align 8
6640 // CHECK5-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
6641 // CHECK5-NEXT:    [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64*
6642 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[TMP82]], align 8
6643 // CHECK5-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
6644 // CHECK5-NEXT:    store i8* null, i8** [[TMP83]], align 8
6645 // CHECK5-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
6646 // CHECK5-NEXT:    [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float**
6647 // CHECK5-NEXT:    store float* [[VLA]], float** [[TMP85]], align 8
6648 // CHECK5-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
6649 // CHECK5-NEXT:    [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float**
6650 // CHECK5-NEXT:    store float* [[VLA]], float** [[TMP87]], align 8
6651 // CHECK5-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
6652 // CHECK5-NEXT:    store i64 [[TMP65]], i64* [[TMP88]], align 8
6653 // CHECK5-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
6654 // CHECK5-NEXT:    store i8* null, i8** [[TMP89]], align 8
6655 // CHECK5-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
6656 // CHECK5-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]**
6657 // CHECK5-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 8
6658 // CHECK5-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
6659 // CHECK5-NEXT:    [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]**
6660 // CHECK5-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8
6661 // CHECK5-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
6662 // CHECK5-NEXT:    store i8* null, i8** [[TMP94]], align 8
6663 // CHECK5-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5
6664 // CHECK5-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64*
6665 // CHECK5-NEXT:    store i64 5, i64* [[TMP96]], align 8
6666 // CHECK5-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5
6667 // CHECK5-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64*
6668 // CHECK5-NEXT:    store i64 5, i64* [[TMP98]], align 8
6669 // CHECK5-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5
6670 // CHECK5-NEXT:    store i8* null, i8** [[TMP99]], align 8
6671 // CHECK5-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6
6672 // CHECK5-NEXT:    [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64*
6673 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[TMP101]], align 8
6674 // CHECK5-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6
6675 // CHECK5-NEXT:    [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i64*
6676 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[TMP103]], align 8
6677 // CHECK5-NEXT:    [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6
6678 // CHECK5-NEXT:    store i8* null, i8** [[TMP104]], align 8
6679 // CHECK5-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7
6680 // CHECK5-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double**
6681 // CHECK5-NEXT:    store double* [[VLA1]], double** [[TMP106]], align 8
6682 // CHECK5-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7
6683 // CHECK5-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to double**
6684 // CHECK5-NEXT:    store double* [[VLA1]], double** [[TMP108]], align 8
6685 // CHECK5-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
6686 // CHECK5-NEXT:    store i64 [[TMP67]], i64* [[TMP109]], align 8
6687 // CHECK5-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7
6688 // CHECK5-NEXT:    store i8* null, i8** [[TMP110]], align 8
6689 // CHECK5-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8
6690 // CHECK5-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to %struct.TT**
6691 // CHECK5-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP112]], align 8
6692 // CHECK5-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8
6693 // CHECK5-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to %struct.TT**
6694 // CHECK5-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP114]], align 8
6695 // CHECK5-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8
6696 // CHECK5-NEXT:    store i8* null, i8** [[TMP115]], align 8
6697 // CHECK5-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9
6698 // CHECK5-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64*
6699 // CHECK5-NEXT:    store i64 [[TMP63]], i64* [[TMP117]], align 8
6700 // CHECK5-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9
6701 // CHECK5-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i64*
6702 // CHECK5-NEXT:    store i64 [[TMP63]], i64* [[TMP119]], align 8
6703 // CHECK5-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9
6704 // CHECK5-NEXT:    store i8* null, i8** [[TMP120]], align 8
6705 // CHECK5-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
6706 // CHECK5-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
6707 // CHECK5-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
6708 // CHECK5-NEXT:    [[TMP124:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP121]], i8** [[TMP122]], i64* [[TMP123]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
6709 // CHECK5-NEXT:    [[TMP125:%.*]] = icmp ne i32 [[TMP124]], 0
6710 // CHECK5-NEXT:    br i1 [[TMP125]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]]
6711 // CHECK5:       omp_offload.failed23:
6712 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]]
6713 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT24]]
6714 // CHECK5:       omp_offload.cont24:
6715 // CHECK5-NEXT:    br label [[OMP_IF_END26:%.*]]
6716 // CHECK5:       omp_if.else25:
6717 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]]
6718 // CHECK5-NEXT:    br label [[OMP_IF_END26]]
6719 // CHECK5:       omp_if.end26:
6720 // CHECK5-NEXT:    [[TMP126:%.*]] = load i32, i32* [[A]], align 4
6721 // CHECK5-NEXT:    [[TMP127:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
6722 // CHECK5-NEXT:    call void @llvm.stackrestore(i8* [[TMP127]])
6723 // CHECK5-NEXT:    ret i32 [[TMP126]]
6724 //
6725 //
6726 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
6727 // CHECK5-SAME: () #[[ATTR2:[0-9]+]] {
6728 // CHECK5-NEXT:  entry:
6729 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
6730 // CHECK5-NEXT:    ret void
6731 //
6732 //
6733 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
6734 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
6735 // CHECK5-NEXT:  entry:
6736 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6737 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6738 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6739 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6740 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6741 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6742 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6743 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6744 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
6745 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6746 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6747 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6748 // CHECK5-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
6749 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6750 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6751 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6752 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
6753 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6754 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6755 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
6756 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6757 // CHECK5:       cond.true:
6758 // CHECK5-NEXT:    br label [[COND_END:%.*]]
6759 // CHECK5:       cond.false:
6760 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6761 // CHECK5-NEXT:    br label [[COND_END]]
6762 // CHECK5:       cond.end:
6763 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
6764 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6765 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6766 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
6767 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6768 // CHECK5:       omp.inner.for.cond:
6769 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
6770 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
6771 // CHECK5-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
6772 // CHECK5-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6773 // CHECK5:       omp.inner.for.body:
6774 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
6775 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
6776 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
6777 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
6778 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6779 // CHECK5:       omp.body.continue:
6780 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6781 // CHECK5:       omp.inner.for.inc:
6782 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
6783 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
6784 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
6785 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
6786 // CHECK5:       omp.inner.for.end:
6787 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6788 // CHECK5:       omp.loop.exit:
6789 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
6790 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6791 // CHECK5-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
6792 // CHECK5-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6793 // CHECK5:       .omp.final.then:
6794 // CHECK5-NEXT:    store i32 33, i32* [[I]], align 4
6795 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6796 // CHECK5:       .omp.final.done:
6797 // CHECK5-NEXT:    ret void
6798 //
6799 //
6800 // CHECK5-LABEL: define {{[^@]+}}@.omp_task_entry.
6801 // CHECK5-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
6802 // CHECK5-NEXT:  entry:
6803 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
6804 // CHECK5-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
6805 // CHECK5-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
6806 // CHECK5-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
6807 // CHECK5-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
6808 // CHECK5-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
6809 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
6810 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
6811 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
6812 // CHECK5-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
6813 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
6814 // CHECK5-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
6815 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
6816 // CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
6817 // CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
6818 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
6819 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
6820 // CHECK5-NEXT:    [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
6821 // CHECK5-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]])
6822 // CHECK5-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]])
6823 // CHECK5-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
6824 // CHECK5-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]])
6825 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25
6826 // CHECK5-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25
6827 // CHECK5-NEXT:    store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25
6828 // CHECK5-NEXT:    store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25
6829 // CHECK5-NEXT:    store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25
6830 // CHECK5-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25
6831 // CHECK5-NEXT:    [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25
6832 // CHECK5-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
6833 // CHECK5-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
6834 // CHECK5-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
6835 // CHECK5:       omp_offload.failed.i:
6836 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]]
6837 // CHECK5-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
6838 // CHECK5:       .omp_outlined..1.exit:
6839 // CHECK5-NEXT:    ret i32 0
6840 //
6841 //
6842 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101
6843 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] {
6844 // CHECK5-NEXT:  entry:
6845 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
6846 // CHECK5-NEXT:    [[K_ADDR:%.*]] = alloca i64, align 8
6847 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
6848 // CHECK5-NEXT:    [[K_CASTED:%.*]] = alloca i64, align 8
6849 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
6850 // CHECK5-NEXT:    store i64 [[K]], i64* [[K_ADDR]], align 8
6851 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
6852 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
6853 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
6854 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[CONV1]], align 4
6855 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
6856 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8
6857 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[K_CASTED]], align 8
6858 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8
6859 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
6860 // CHECK5-NEXT:    ret void
6861 //
6862 //
6863 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2
6864 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] {
6865 // CHECK5-NEXT:  entry:
6866 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6867 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6868 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
6869 // CHECK5-NEXT:    [[K_ADDR:%.*]] = alloca i64, align 8
6870 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6871 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6872 // CHECK5-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
6873 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6874 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6875 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6876 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6877 // CHECK5-NEXT:    [[I:%.*]] = alloca i32, align 4
6878 // CHECK5-NEXT:    [[K1:%.*]] = alloca i64, align 8
6879 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6880 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6881 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
6882 // CHECK5-NEXT:    store i64 [[K]], i64* [[K_ADDR]], align 8
6883 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
6884 // CHECK5-NEXT:    [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8
6885 // CHECK5-NEXT:    store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8
6886 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6887 // CHECK5-NEXT:    store i32 8, i32* [[DOTOMP_UB]], align 4
6888 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6889 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6890 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6891 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
6892 // CHECK5-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]])
6893 // CHECK5-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1073741859, i32 0, i32 8, i32 1, i32 1)
6894 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6895 // CHECK5:       omp.dispatch.cond:
6896 // CHECK5-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6897 // CHECK5-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0
6898 // CHECK5-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6899 // CHECK5:       omp.dispatch.body:
6900 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6901 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
6902 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6903 // CHECK5:       omp.inner.for.cond:
6904 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
6905 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
6906 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
6907 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6908 // CHECK5:       omp.inner.for.body:
6909 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
6910 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
6911 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL]]
6912 // CHECK5-NEXT:    store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !26
6913 // CHECK5-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !26
6914 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
6915 // CHECK5-NEXT:    [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3
6916 // CHECK5-NEXT:    [[CONV3:%.*]] = sext i32 [[MUL2]] to i64
6917 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]]
6918 // CHECK5-NEXT:    store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !26
6919 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !26
6920 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1
6921 // CHECK5-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !26
6922 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6923 // CHECK5:       omp.body.continue:
6924 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6925 // CHECK5:       omp.inner.for.inc:
6926 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
6927 // CHECK5-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1
6928 // CHECK5-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
6929 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
6930 // CHECK5:       omp.inner.for.end:
6931 // CHECK5-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6932 // CHECK5:       omp.dispatch.inc:
6933 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND]]
6934 // CHECK5:       omp.dispatch.end:
6935 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6936 // CHECK5-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
6937 // CHECK5-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6938 // CHECK5:       .omp.final.then:
6939 // CHECK5-NEXT:    store i32 1, i32* [[I]], align 4
6940 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6941 // CHECK5:       .omp.final.done:
6942 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6943 // CHECK5-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
6944 // CHECK5-NEXT:    br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
6945 // CHECK5:       .omp.linear.pu:
6946 // CHECK5-NEXT:    [[TMP16:%.*]] = load i64, i64* [[K1]], align 8
6947 // CHECK5-NEXT:    store i64 [[TMP16]], i64* [[K_ADDR]], align 8
6948 // CHECK5-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
6949 // CHECK5:       .omp.linear.pu.done:
6950 // CHECK5-NEXT:    ret void
6951 //
6952 //
6953 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
6954 // CHECK5-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR2]] {
6955 // CHECK5-NEXT:  entry:
6956 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
6957 // CHECK5-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
6958 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
6959 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
6960 // CHECK5-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
6961 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
6962 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
6963 // CHECK5-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
6964 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
6965 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
6966 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
6967 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
6968 // CHECK5-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
6969 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
6970 // CHECK5-NEXT:    store i16 [[TMP0]], i16* [[CONV3]], align 2
6971 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
6972 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
6973 // CHECK5-NEXT:    [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
6974 // CHECK5-NEXT:    store i32 [[TMP2]], i32* [[CONV4]], align 4
6975 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
6976 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4
6977 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
6978 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[CONV5]], align 4
6979 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8
6980 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]])
6981 // CHECK5-NEXT:    ret void
6982 //
6983 //
6984 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3
6985 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
6986 // CHECK5-NEXT:  entry:
6987 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6988 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6989 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
6990 // CHECK5-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
6991 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
6992 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
6993 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i64, align 8
6994 // CHECK5-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
6995 // CHECK5-NEXT:    [[DOTLINEAR_START3:%.*]] = alloca i32, align 4
6996 // CHECK5-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
6997 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
6998 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
6999 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
7000 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7001 // CHECK5-NEXT:    [[IT:%.*]] = alloca i64, align 8
7002 // CHECK5-NEXT:    [[LIN4:%.*]] = alloca i32, align 4
7003 // CHECK5-NEXT:    [[A5:%.*]] = alloca i32, align 4
7004 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7005 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7006 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7007 // CHECK5-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
7008 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7009 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7010 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
7011 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7012 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4
7013 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
7014 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4
7015 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4
7016 // CHECK5-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
7017 // CHECK5-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
7018 // CHECK5-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
7019 // CHECK5-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
7020 // CHECK5-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
7021 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7022 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7023 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
7024 // CHECK5-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]])
7025 // CHECK5-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
7026 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
7027 // CHECK5-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
7028 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7029 // CHECK5:       cond.true:
7030 // CHECK5-NEXT:    br label [[COND_END:%.*]]
7031 // CHECK5:       cond.false:
7032 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
7033 // CHECK5-NEXT:    br label [[COND_END]]
7034 // CHECK5:       cond.end:
7035 // CHECK5-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
7036 // CHECK5-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
7037 // CHECK5-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
7038 // CHECK5-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
7039 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7040 // CHECK5:       omp.inner.for.cond:
7041 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
7042 // CHECK5-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29
7043 // CHECK5-NEXT:    [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
7044 // CHECK5-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7045 // CHECK5:       omp.inner.for.body:
7046 // CHECK5-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
7047 // CHECK5-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
7048 // CHECK5-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
7049 // CHECK5-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !29
7050 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !29
7051 // CHECK5-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP10]] to i64
7052 // CHECK5-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
7053 // CHECK5-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29
7054 // CHECK5-NEXT:    [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]]
7055 // CHECK5-NEXT:    [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]]
7056 // CHECK5-NEXT:    [[CONV9:%.*]] = trunc i64 [[ADD]] to i32
7057 // CHECK5-NEXT:    store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !29
7058 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !29
7059 // CHECK5-NEXT:    [[CONV10:%.*]] = sext i32 [[TMP13]] to i64
7060 // CHECK5-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
7061 // CHECK5-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29
7062 // CHECK5-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]]
7063 // CHECK5-NEXT:    [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]]
7064 // CHECK5-NEXT:    [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32
7065 // CHECK5-NEXT:    store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !29
7066 // CHECK5-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !29
7067 // CHECK5-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP16]] to i32
7068 // CHECK5-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
7069 // CHECK5-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
7070 // CHECK5-NEXT:    store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group !29
7071 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7072 // CHECK5:       omp.body.continue:
7073 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7074 // CHECK5:       omp.inner.for.inc:
7075 // CHECK5-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
7076 // CHECK5-NEXT:    [[ADD17:%.*]] = add i64 [[TMP17]], 1
7077 // CHECK5-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
7078 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
7079 // CHECK5:       omp.inner.for.end:
7080 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7081 // CHECK5:       omp.loop.exit:
7082 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
7083 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7084 // CHECK5-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
7085 // CHECK5-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7086 // CHECK5:       .omp.final.then:
7087 // CHECK5-NEXT:    store i64 400, i64* [[IT]], align 8
7088 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7089 // CHECK5:       .omp.final.done:
7090 // CHECK5-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7091 // CHECK5-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
7092 // CHECK5-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
7093 // CHECK5:       .omp.linear.pu:
7094 // CHECK5-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4
7095 // CHECK5-NEXT:    store i32 [[TMP22]], i32* [[CONV1]], align 4
7096 // CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A5]], align 4
7097 // CHECK5-NEXT:    store i32 [[TMP23]], i32* [[CONV2]], align 4
7098 // CHECK5-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
7099 // CHECK5:       .omp.linear.pu.done:
7100 // CHECK5-NEXT:    ret void
7101 //
7102 //
7103 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
7104 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] {
7105 // CHECK5-NEXT:  entry:
7106 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7107 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7108 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
7109 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
7110 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7111 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7112 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7113 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7114 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
7115 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
7116 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
7117 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
7118 // CHECK5-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
7119 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
7120 // CHECK5-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
7121 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
7122 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
7123 // CHECK5-NEXT:    ret void
7124 //
7125 //
7126 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..4
7127 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
7128 // CHECK5-NEXT:  entry:
7129 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7130 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7131 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7132 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7133 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7134 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i16, align 2
7135 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7136 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7137 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7138 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7139 // CHECK5-NEXT:    [[IT:%.*]] = alloca i16, align 2
7140 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7141 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7142 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7143 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7144 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7145 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7146 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7147 // CHECK5-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
7148 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7149 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7150 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7151 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
7152 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7153 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7154 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
7155 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7156 // CHECK5:       cond.true:
7157 // CHECK5-NEXT:    br label [[COND_END:%.*]]
7158 // CHECK5:       cond.false:
7159 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7160 // CHECK5-NEXT:    br label [[COND_END]]
7161 // CHECK5:       cond.end:
7162 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
7163 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7164 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7165 // CHECK5-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
7166 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7167 // CHECK5:       omp.inner.for.cond:
7168 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7169 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
7170 // CHECK5-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
7171 // CHECK5-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7172 // CHECK5:       omp.inner.for.body:
7173 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7174 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
7175 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
7176 // CHECK5-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
7177 // CHECK5-NEXT:    store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !32
7178 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !32
7179 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
7180 // CHECK5-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !32
7181 // CHECK5-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !32
7182 // CHECK5-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
7183 // CHECK5-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
7184 // CHECK5-NEXT:    [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
7185 // CHECK5-NEXT:    store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group !32
7186 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7187 // CHECK5:       omp.body.continue:
7188 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7189 // CHECK5:       omp.inner.for.inc:
7190 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7191 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
7192 // CHECK5-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
7193 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
7194 // CHECK5:       omp.inner.for.end:
7195 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7196 // CHECK5:       omp.loop.exit:
7197 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
7198 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7199 // CHECK5-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
7200 // CHECK5-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7201 // CHECK5:       .omp.final.then:
7202 // CHECK5-NEXT:    store i16 22, i16* [[IT]], align 2
7203 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7204 // CHECK5:       .omp.final.done:
7205 // CHECK5-NEXT:    ret void
7206 //
7207 //
7208 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
7209 // CHECK5-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
7210 // CHECK5-NEXT:  entry:
7211 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7212 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
7213 // CHECK5-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
7214 // CHECK5-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
7215 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
7216 // CHECK5-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
7217 // CHECK5-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
7218 // CHECK5-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
7219 // CHECK5-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
7220 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7221 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
7222 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7223 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7224 // CHECK5-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
7225 // CHECK5-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
7226 // CHECK5-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
7227 // CHECK5-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
7228 // CHECK5-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
7229 // CHECK5-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
7230 // CHECK5-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
7231 // CHECK5-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
7232 // CHECK5-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
7233 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7234 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
7235 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
7236 // CHECK5-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
7237 // CHECK5-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
7238 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
7239 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
7240 // CHECK5-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
7241 // CHECK5-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
7242 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
7243 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
7244 // CHECK5-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
7245 // CHECK5-NEXT:    store i32 [[TMP8]], i32* [[CONV6]], align 4
7246 // CHECK5-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
7247 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4
7248 // CHECK5-NEXT:    [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
7249 // CHECK5-NEXT:    store i32 [[TMP10]], i32* [[CONV7]], align 4
7250 // CHECK5-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
7251 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]])
7252 // CHECK5-NEXT:    ret void
7253 //
7254 //
7255 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..7
7256 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
7257 // CHECK5-NEXT:  entry:
7258 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7259 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7260 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7261 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
7262 // CHECK5-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
7263 // CHECK5-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
7264 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
7265 // CHECK5-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
7266 // CHECK5-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
7267 // CHECK5-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
7268 // CHECK5-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
7269 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7270 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7271 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i8, align 1
7272 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7273 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7274 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7275 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7276 // CHECK5-NEXT:    [[IT:%.*]] = alloca i8, align 1
7277 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7278 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7279 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7280 // CHECK5-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
7281 // CHECK5-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
7282 // CHECK5-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
7283 // CHECK5-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
7284 // CHECK5-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
7285 // CHECK5-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
7286 // CHECK5-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
7287 // CHECK5-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
7288 // CHECK5-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
7289 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7290 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
7291 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
7292 // CHECK5-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
7293 // CHECK5-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
7294 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
7295 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
7296 // CHECK5-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
7297 // CHECK5-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
7298 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
7299 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7300 // CHECK5-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
7301 // CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7302 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7303 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4
7304 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7305 // CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
7306 // CHECK5-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
7307 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
7308 // CHECK5:       omp.dispatch.cond:
7309 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7310 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
7311 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7312 // CHECK5:       cond.true:
7313 // CHECK5-NEXT:    br label [[COND_END:%.*]]
7314 // CHECK5:       cond.false:
7315 // CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7316 // CHECK5-NEXT:    br label [[COND_END]]
7317 // CHECK5:       cond.end:
7318 // CHECK5-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
7319 // CHECK5-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7320 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7321 // CHECK5-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
7322 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
7323 // CHECK5-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7324 // CHECK5-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
7325 // CHECK5-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7326 // CHECK5:       omp.dispatch.body:
7327 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7328 // CHECK5:       omp.inner.for.cond:
7329 // CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7330 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
7331 // CHECK5-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
7332 // CHECK5-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7333 // CHECK5:       omp.inner.for.body:
7334 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7335 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
7336 // CHECK5-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
7337 // CHECK5-NEXT:    [[CONV8:%.*]] = trunc i32 [[SUB]] to i8
7338 // CHECK5-NEXT:    store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !35
7339 // CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !35
7340 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
7341 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group !35
7342 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
7343 // CHECK5-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35
7344 // CHECK5-NEXT:    [[CONV9:%.*]] = fpext float [[TMP20]] to double
7345 // CHECK5-NEXT:    [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
7346 // CHECK5-NEXT:    [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
7347 // CHECK5-NEXT:    store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35
7348 // CHECK5-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
7349 // CHECK5-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !35
7350 // CHECK5-NEXT:    [[CONV13:%.*]] = fpext float [[TMP21]] to double
7351 // CHECK5-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
7352 // CHECK5-NEXT:    [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
7353 // CHECK5-NEXT:    store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !35
7354 // CHECK5-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
7355 // CHECK5-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
7356 // CHECK5-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !35
7357 // CHECK5-NEXT:    [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00
7358 // CHECK5-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !35
7359 // CHECK5-NEXT:    [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]]
7360 // CHECK5-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]]
7361 // CHECK5-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
7362 // CHECK5-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !35
7363 // CHECK5-NEXT:    [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00
7364 // CHECK5-NEXT:    store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !35
7365 // CHECK5-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
7366 // CHECK5-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35
7367 // CHECK5-NEXT:    [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1
7368 // CHECK5-NEXT:    store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !35
7369 // CHECK5-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
7370 // CHECK5-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35
7371 // CHECK5-NEXT:    [[CONV23:%.*]] = sext i8 [[TMP26]] to i32
7372 // CHECK5-NEXT:    [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
7373 // CHECK5-NEXT:    [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
7374 // CHECK5-NEXT:    store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !35
7375 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7376 // CHECK5:       omp.body.continue:
7377 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7378 // CHECK5:       omp.inner.for.inc:
7379 // CHECK5-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7380 // CHECK5-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1
7381 // CHECK5-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
7382 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
7383 // CHECK5:       omp.inner.for.end:
7384 // CHECK5-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
7385 // CHECK5:       omp.dispatch.inc:
7386 // CHECK5-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7387 // CHECK5-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7388 // CHECK5-NEXT:    [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
7389 // CHECK5-NEXT:    store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4
7390 // CHECK5-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7391 // CHECK5-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
7392 // CHECK5-NEXT:    [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
7393 // CHECK5-NEXT:    store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4
7394 // CHECK5-NEXT:    br label [[OMP_DISPATCH_COND]]
7395 // CHECK5:       omp.dispatch.end:
7396 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
7397 // CHECK5-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7398 // CHECK5-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
7399 // CHECK5-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7400 // CHECK5:       .omp.final.then:
7401 // CHECK5-NEXT:    store i8 96, i8* [[IT]], align 1
7402 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7403 // CHECK5:       .omp.final.done:
7404 // CHECK5-NEXT:    ret void
7405 //
7406 //
7407 // CHECK5-LABEL: define {{[^@]+}}@_Z3bari
7408 // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
7409 // CHECK5-NEXT:  entry:
7410 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7411 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
7412 // CHECK5-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
7413 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7414 // CHECK5-NEXT:    store i32 0, i32* [[A]], align 4
7415 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
7416 // CHECK5-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
7417 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
7418 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
7419 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
7420 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
7421 // CHECK5-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
7422 // CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
7423 // CHECK5-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
7424 // CHECK5-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
7425 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
7426 // CHECK5-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
7427 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
7428 // CHECK5-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
7429 // CHECK5-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
7430 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
7431 // CHECK5-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
7432 // CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
7433 // CHECK5-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
7434 // CHECK5-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
7435 // CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
7436 // CHECK5-NEXT:    ret i32 [[TMP8]]
7437 //
7438 //
7439 // CHECK5-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
7440 // CHECK5-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
7441 // CHECK5-NEXT:  entry:
7442 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
7443 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7444 // CHECK5-NEXT:    [[B:%.*]] = alloca i32, align 4
7445 // CHECK5-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
7446 // CHECK5-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
7447 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
7448 // CHECK5-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
7449 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7450 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8
7451 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8
7452 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8
7453 // CHECK5-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8
7454 // CHECK5-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
7455 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7456 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
7457 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
7458 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
7459 // CHECK5-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
7460 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
7461 // CHECK5-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
7462 // CHECK5-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
7463 // CHECK5-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
7464 // CHECK5-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
7465 // CHECK5-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
7466 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
7467 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
7468 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
7469 // CHECK5-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
7470 // CHECK5-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
7471 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B]], align 4
7472 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
7473 // CHECK5-NEXT:    store i32 [[TMP6]], i32* [[CONV]], align 4
7474 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[B_CASTED]], align 8
7475 // CHECK5-NEXT:    [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
7476 // CHECK5-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
7477 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
7478 // CHECK5-NEXT:    [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL]] to i8
7479 // CHECK5-NEXT:    store i8 [[FROMBOOL3]], i8* [[CONV2]], align 1
7480 // CHECK5-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
7481 // CHECK5-NEXT:    [[TMP10:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
7482 // CHECK5-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP10]] to i1
7483 // CHECK5-NEXT:    br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
7484 // CHECK5:       omp_if.then:
7485 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
7486 // CHECK5-NEXT:    [[TMP11:%.*]] = mul nuw i64 2, [[TMP2]]
7487 // CHECK5-NEXT:    [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
7488 // CHECK5-NEXT:    [[TMP13:%.*]] = bitcast [6 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
7489 // CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP13]], i8* align 8 bitcast ([6 x i64]* @.offload_sizes.11 to i8*), i64 48, i1 false)
7490 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7491 // CHECK5-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.S1**
7492 // CHECK5-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP15]], align 8
7493 // CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7494 // CHECK5-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to double**
7495 // CHECK5-NEXT:    store double* [[A]], double** [[TMP17]], align 8
7496 // CHECK5-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
7497 // CHECK5-NEXT:    store i8* null, i8** [[TMP18]], align 8
7498 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
7499 // CHECK5-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
7500 // CHECK5-NEXT:    store i64 [[TMP7]], i64* [[TMP20]], align 8
7501 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
7502 // CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
7503 // CHECK5-NEXT:    store i64 [[TMP7]], i64* [[TMP22]], align 8
7504 // CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
7505 // CHECK5-NEXT:    store i8* null, i8** [[TMP23]], align 8
7506 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
7507 // CHECK5-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
7508 // CHECK5-NEXT:    store i64 2, i64* [[TMP25]], align 8
7509 // CHECK5-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
7510 // CHECK5-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
7511 // CHECK5-NEXT:    store i64 2, i64* [[TMP27]], align 8
7512 // CHECK5-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
7513 // CHECK5-NEXT:    store i8* null, i8** [[TMP28]], align 8
7514 // CHECK5-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
7515 // CHECK5-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64*
7516 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[TMP30]], align 8
7517 // CHECK5-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
7518 // CHECK5-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
7519 // CHECK5-NEXT:    store i64 [[TMP2]], i64* [[TMP32]], align 8
7520 // CHECK5-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
7521 // CHECK5-NEXT:    store i8* null, i8** [[TMP33]], align 8
7522 // CHECK5-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
7523 // CHECK5-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
7524 // CHECK5-NEXT:    store i16* [[VLA]], i16** [[TMP35]], align 8
7525 // CHECK5-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
7526 // CHECK5-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
7527 // CHECK5-NEXT:    store i16* [[VLA]], i16** [[TMP37]], align 8
7528 // CHECK5-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
7529 // CHECK5-NEXT:    store i64 [[TMP12]], i64* [[TMP38]], align 8
7530 // CHECK5-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
7531 // CHECK5-NEXT:    store i8* null, i8** [[TMP39]], align 8
7532 // CHECK5-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
7533 // CHECK5-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
7534 // CHECK5-NEXT:    store i64 [[TMP9]], i64* [[TMP41]], align 8
7535 // CHECK5-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
7536 // CHECK5-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
7537 // CHECK5-NEXT:    store i64 [[TMP9]], i64* [[TMP43]], align 8
7538 // CHECK5-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
7539 // CHECK5-NEXT:    store i8* null, i8** [[TMP44]], align 8
7540 // CHECK5-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7541 // CHECK5-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7542 // CHECK5-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
7543 // CHECK5-NEXT:    [[TMP48:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
7544 // CHECK5-NEXT:    [[TOBOOL5:%.*]] = trunc i8 [[TMP48]] to i1
7545 // CHECK5-NEXT:    [[TMP49:%.*]] = select i1 [[TOBOOL5]], i32 0, i32 1
7546 // CHECK5-NEXT:    [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP45]], i8** [[TMP46]], i64* [[TMP47]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP49]])
7547 // CHECK5-NEXT:    [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0
7548 // CHECK5-NEXT:    br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
7549 // CHECK5:       omp_offload.failed:
7550 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]]
7551 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
7552 // CHECK5:       omp_offload.cont:
7553 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
7554 // CHECK5:       omp_if.else:
7555 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]]
7556 // CHECK5-NEXT:    br label [[OMP_IF_END]]
7557 // CHECK5:       omp_if.end:
7558 // CHECK5-NEXT:    [[TMP52:%.*]] = mul nsw i64 1, [[TMP2]]
7559 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP52]]
7560 // CHECK5-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
7561 // CHECK5-NEXT:    [[TMP53:%.*]] = load i16, i16* [[ARRAYIDX6]], align 2
7562 // CHECK5-NEXT:    [[CONV7:%.*]] = sext i16 [[TMP53]] to i32
7563 // CHECK5-NEXT:    [[TMP54:%.*]] = load i32, i32* [[B]], align 4
7564 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i32 [[CONV7]], [[TMP54]]
7565 // CHECK5-NEXT:    [[TMP55:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
7566 // CHECK5-NEXT:    call void @llvm.stackrestore(i8* [[TMP55]])
7567 // CHECK5-NEXT:    ret i32 [[ADD8]]
7568 //
7569 //
7570 // CHECK5-LABEL: define {{[^@]+}}@_ZL7fstatici
7571 // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
7572 // CHECK5-NEXT:  entry:
7573 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7574 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
7575 // CHECK5-NEXT:    [[AA:%.*]] = alloca i16, align 2
7576 // CHECK5-NEXT:    [[AAA:%.*]] = alloca i8, align 1
7577 // CHECK5-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
7578 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
7579 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
7580 // CHECK5-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
7581 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
7582 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
7583 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
7584 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7585 // CHECK5-NEXT:    store i32 0, i32* [[A]], align 4
7586 // CHECK5-NEXT:    store i16 0, i16* [[AA]], align 2
7587 // CHECK5-NEXT:    store i8 0, i8* [[AAA]], align 1
7588 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
7589 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
7590 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
7591 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
7592 // CHECK5-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
7593 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
7594 // CHECK5-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
7595 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
7596 // CHECK5-NEXT:    [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
7597 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
7598 // CHECK5-NEXT:    store i8 [[TMP4]], i8* [[CONV2]], align 1
7599 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
7600 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
7601 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
7602 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
7603 // CHECK5:       omp_if.then:
7604 // CHECK5-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7605 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
7606 // CHECK5-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
7607 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7608 // CHECK5-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
7609 // CHECK5-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
7610 // CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
7611 // CHECK5-NEXT:    store i8* null, i8** [[TMP11]], align 8
7612 // CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
7613 // CHECK5-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
7614 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
7615 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
7616 // CHECK5-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
7617 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
7618 // CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
7619 // CHECK5-NEXT:    store i8* null, i8** [[TMP16]], align 8
7620 // CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
7621 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
7622 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[TMP18]], align 8
7623 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
7624 // CHECK5-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
7625 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[TMP20]], align 8
7626 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
7627 // CHECK5-NEXT:    store i8* null, i8** [[TMP21]], align 8
7628 // CHECK5-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
7629 // CHECK5-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
7630 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8
7631 // CHECK5-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
7632 // CHECK5-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
7633 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8
7634 // CHECK5-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
7635 // CHECK5-NEXT:    store i8* null, i8** [[TMP26]], align 8
7636 // CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7637 // CHECK5-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7638 // CHECK5-NEXT:    [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
7639 // CHECK5-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
7640 // CHECK5-NEXT:    br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
7641 // CHECK5:       omp_offload.failed:
7642 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
7643 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
7644 // CHECK5:       omp_offload.cont:
7645 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
7646 // CHECK5:       omp_if.else:
7647 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
7648 // CHECK5-NEXT:    br label [[OMP_IF_END]]
7649 // CHECK5:       omp_if.end:
7650 // CHECK5-NEXT:    [[TMP31:%.*]] = load i32, i32* [[A]], align 4
7651 // CHECK5-NEXT:    ret i32 [[TMP31]]
7652 //
7653 //
7654 // CHECK5-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
7655 // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
7656 // CHECK5-NEXT:  entry:
7657 // CHECK5-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
7658 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
7659 // CHECK5-NEXT:    [[AA:%.*]] = alloca i16, align 2
7660 // CHECK5-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
7661 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
7662 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
7663 // CHECK5-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
7664 // CHECK5-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
7665 // CHECK5-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
7666 // CHECK5-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
7667 // CHECK5-NEXT:    store i32 0, i32* [[A]], align 4
7668 // CHECK5-NEXT:    store i16 0, i16* [[AA]], align 2
7669 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
7670 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
7671 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
7672 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
7673 // CHECK5-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
7674 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
7675 // CHECK5-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
7676 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
7677 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
7678 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
7679 // CHECK5-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
7680 // CHECK5:       omp_if.then:
7681 // CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7682 // CHECK5-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
7683 // CHECK5-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
7684 // CHECK5-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7685 // CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
7686 // CHECK5-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
7687 // CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
7688 // CHECK5-NEXT:    store i8* null, i8** [[TMP9]], align 8
7689 // CHECK5-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
7690 // CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
7691 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
7692 // CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
7693 // CHECK5-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
7694 // CHECK5-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
7695 // CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
7696 // CHECK5-NEXT:    store i8* null, i8** [[TMP14]], align 8
7697 // CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
7698 // CHECK5-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
7699 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
7700 // CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
7701 // CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
7702 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
7703 // CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
7704 // CHECK5-NEXT:    store i8* null, i8** [[TMP19]], align 8
7705 // CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7706 // CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7707 // CHECK5-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
7708 // CHECK5-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
7709 // CHECK5-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
7710 // CHECK5:       omp_offload.failed:
7711 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
7712 // CHECK5-NEXT:    br label [[OMP_OFFLOAD_CONT]]
7713 // CHECK5:       omp_offload.cont:
7714 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
7715 // CHECK5:       omp_if.else:
7716 // CHECK5-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
7717 // CHECK5-NEXT:    br label [[OMP_IF_END]]
7718 // CHECK5:       omp_if.end:
7719 // CHECK5-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
7720 // CHECK5-NEXT:    ret i32 [[TMP24]]
7721 //
7722 //
7723 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214
7724 // CHECK5-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
7725 // CHECK5-NEXT:  entry:
7726 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
7727 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
7728 // CHECK5-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
7729 // CHECK5-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
7730 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
7731 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7732 // CHECK5-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
7733 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7734 // CHECK5-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
7735 // CHECK5-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
7736 // CHECK5-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
7737 // CHECK5-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
7738 // CHECK5-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
7739 // CHECK5-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
7740 // CHECK5-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
7741 // CHECK5-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
7742 // CHECK5-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
7743 // CHECK5-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
7744 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
7745 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
7746 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
7747 // CHECK5-NEXT:    [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8
7748 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
7749 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[CONV]], align 4
7750 // CHECK5-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32*
7751 // CHECK5-NEXT:    store i32 [[TMP5]], i32* [[CONV4]], align 4
7752 // CHECK5-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
7753 // CHECK5-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
7754 // CHECK5-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
7755 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
7756 // CHECK5-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
7757 // CHECK5-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV5]], align 1
7758 // CHECK5-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
7759 // CHECK5-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1
7760 // CHECK5-NEXT:    [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1
7761 // CHECK5-NEXT:    br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
7762 // CHECK5:       omp_if.then:
7763 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]])
7764 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
7765 // CHECK5:       omp_if.else:
7766 // CHECK5-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
7767 // CHECK5-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
7768 // CHECK5-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
7769 // CHECK5-NEXT:    call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR4]]
7770 // CHECK5-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
7771 // CHECK5-NEXT:    br label [[OMP_IF_END]]
7772 // CHECK5:       omp_if.end:
7773 // CHECK5-NEXT:    ret void
7774 //
7775 //
7776 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..10
7777 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
7778 // CHECK5-NEXT:  entry:
7779 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7780 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7781 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
7782 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
7783 // CHECK5-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
7784 // CHECK5-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
7785 // CHECK5-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
7786 // CHECK5-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7787 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
7788 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i64, align 8
7789 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
7790 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
7791 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
7792 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7793 // CHECK5-NEXT:    [[IT:%.*]] = alloca i64, align 8
7794 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7795 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7796 // CHECK5-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
7797 // CHECK5-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
7798 // CHECK5-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
7799 // CHECK5-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
7800 // CHECK5-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
7801 // CHECK5-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
7802 // CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
7803 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
7804 // CHECK5-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
7805 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
7806 // CHECK5-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
7807 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
7808 // CHECK5-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
7809 // CHECK5-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
7810 // CHECK5-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
7811 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7812 // CHECK5-NEXT:    [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 1
7813 // CHECK5-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1
7814 // CHECK5-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
7815 // CHECK5:       omp_if.then:
7816 // CHECK5-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7817 // CHECK5-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
7818 // CHECK5-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
7819 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
7820 // CHECK5-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3
7821 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7822 // CHECK5:       cond.true:
7823 // CHECK5-NEXT:    br label [[COND_END:%.*]]
7824 // CHECK5:       cond.false:
7825 // CHECK5-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
7826 // CHECK5-NEXT:    br label [[COND_END]]
7827 // CHECK5:       cond.end:
7828 // CHECK5-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
7829 // CHECK5-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
7830 // CHECK5-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
7831 // CHECK5-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8
7832 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7833 // CHECK5:       omp.inner.for.cond:
7834 // CHECK5-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
7835 // CHECK5-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !38
7836 // CHECK5-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]]
7837 // CHECK5-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7838 // CHECK5:       omp.inner.for.body:
7839 // CHECK5-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
7840 // CHECK5-NEXT:    [[MUL:%.*]] = mul i64 [[TMP12]], 400
7841 // CHECK5-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
7842 // CHECK5-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !38
7843 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !38
7844 // CHECK5-NEXT:    [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double
7845 // CHECK5-NEXT:    [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00
7846 // CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
7847 // CHECK5-NEXT:    store double [[ADD]], double* [[A]], align 8, !nontemporal !39, !llvm.access.group !38
7848 // CHECK5-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
7849 // CHECK5-NEXT:    [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !39, !llvm.access.group !38
7850 // CHECK5-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
7851 // CHECK5-NEXT:    store double [[INC]], double* [[A6]], align 8, !nontemporal !39, !llvm.access.group !38
7852 // CHECK5-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
7853 // CHECK5-NEXT:    [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]]
7854 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]]
7855 // CHECK5-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
7856 // CHECK5-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !38
7857 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7858 // CHECK5:       omp.body.continue:
7859 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7860 // CHECK5:       omp.inner.for.inc:
7861 // CHECK5-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
7862 // CHECK5-NEXT:    [[ADD9:%.*]] = add i64 [[TMP16]], 1
7863 // CHECK5-NEXT:    store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
7864 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
7865 // CHECK5:       omp.inner.for.end:
7866 // CHECK5-NEXT:    br label [[OMP_IF_END:%.*]]
7867 // CHECK5:       omp_if.else:
7868 // CHECK5-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7869 // CHECK5-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
7870 // CHECK5-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
7871 // CHECK5-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
7872 // CHECK5-NEXT:    [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3
7873 // CHECK5-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
7874 // CHECK5:       cond.true11:
7875 // CHECK5-NEXT:    br label [[COND_END13:%.*]]
7876 // CHECK5:       cond.false12:
7877 // CHECK5-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
7878 // CHECK5-NEXT:    br label [[COND_END13]]
7879 // CHECK5:       cond.end13:
7880 // CHECK5-NEXT:    [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ]
7881 // CHECK5-NEXT:    store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8
7882 // CHECK5-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
7883 // CHECK5-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8
7884 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND15:%.*]]
7885 // CHECK5:       omp.inner.for.cond15:
7886 // CHECK5-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7887 // CHECK5-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
7888 // CHECK5-NEXT:    [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
7889 // CHECK5-NEXT:    br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]]
7890 // CHECK5:       omp.inner.for.body17:
7891 // CHECK5-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7892 // CHECK5-NEXT:    [[MUL18:%.*]] = mul i64 [[TMP24]], 400
7893 // CHECK5-NEXT:    [[SUB19:%.*]] = sub i64 2000, [[MUL18]]
7894 // CHECK5-NEXT:    store i64 [[SUB19]], i64* [[IT]], align 8
7895 // CHECK5-NEXT:    [[TMP25:%.*]] = load i32, i32* [[CONV]], align 4
7896 // CHECK5-NEXT:    [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double
7897 // CHECK5-NEXT:    [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00
7898 // CHECK5-NEXT:    [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
7899 // CHECK5-NEXT:    store double [[ADD21]], double* [[A22]], align 8
7900 // CHECK5-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
7901 // CHECK5-NEXT:    [[TMP26:%.*]] = load double, double* [[A23]], align 8
7902 // CHECK5-NEXT:    [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00
7903 // CHECK5-NEXT:    store double [[INC24]], double* [[A23]], align 8
7904 // CHECK5-NEXT:    [[CONV25:%.*]] = fptosi double [[INC24]] to i16
7905 // CHECK5-NEXT:    [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]]
7906 // CHECK5-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]]
7907 // CHECK5-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1
7908 // CHECK5-NEXT:    store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2
7909 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE28:%.*]]
7910 // CHECK5:       omp.body.continue28:
7911 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC29:%.*]]
7912 // CHECK5:       omp.inner.for.inc29:
7913 // CHECK5-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
7914 // CHECK5-NEXT:    [[ADD30:%.*]] = add i64 [[TMP28]], 1
7915 // CHECK5-NEXT:    store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8
7916 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP42:![0-9]+]]
7917 // CHECK5:       omp.inner.for.end31:
7918 // CHECK5-NEXT:    br label [[OMP_IF_END]]
7919 // CHECK5:       omp_if.end:
7920 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7921 // CHECK5:       omp.loop.exit:
7922 // CHECK5-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7923 // CHECK5-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
7924 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
7925 // CHECK5-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7926 // CHECK5-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
7927 // CHECK5-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7928 // CHECK5:       .omp.final.then:
7929 // CHECK5-NEXT:    store i64 400, i64* [[IT]], align 8
7930 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7931 // CHECK5:       .omp.final.done:
7932 // CHECK5-NEXT:    ret void
7933 //
7934 //
7935 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
7936 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
7937 // CHECK5-NEXT:  entry:
7938 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7939 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7940 // CHECK5-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
7941 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
7942 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
7943 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
7944 // CHECK5-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
7945 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7946 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7947 // CHECK5-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
7948 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
7949 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7950 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7951 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
7952 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
7953 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
7954 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
7955 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[CONV3]], align 4
7956 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
7957 // CHECK5-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
7958 // CHECK5-NEXT:    [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
7959 // CHECK5-NEXT:    store i16 [[TMP3]], i16* [[CONV4]], align 2
7960 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
7961 // CHECK5-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1
7962 // CHECK5-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
7963 // CHECK5-NEXT:    store i8 [[TMP5]], i8* [[CONV5]], align 1
7964 // CHECK5-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
7965 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
7966 // CHECK5-NEXT:    ret void
7967 //
7968 //
7969 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..13
7970 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
7971 // CHECK5-NEXT:  entry:
7972 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7973 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7974 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7975 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7976 // CHECK5-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
7977 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
7978 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7979 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7980 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7981 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7982 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
7983 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
7984 // CHECK5-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
7985 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
7986 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
7987 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
7988 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
7989 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
7990 // CHECK5-NEXT:    ret void
7991 //
7992 //
7993 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
7994 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
7995 // CHECK5-NEXT:  entry:
7996 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
7997 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
7998 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
7999 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8000 // CHECK5-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
8001 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8002 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8003 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
8004 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8005 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8006 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
8007 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
8008 // CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8009 // CHECK5-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
8010 // CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
8011 // CHECK5-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
8012 // CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
8013 // CHECK5-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
8014 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
8015 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
8016 // CHECK5-NEXT:    ret void
8017 //
8018 //
8019 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..16
8020 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
8021 // CHECK5-NEXT:  entry:
8022 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8023 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8024 // CHECK5-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8025 // CHECK5-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
8026 // CHECK5-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
8027 // CHECK5-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
8028 // CHECK5-NEXT:    [[TMP:%.*]] = alloca i64, align 8
8029 // CHECK5-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
8030 // CHECK5-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
8031 // CHECK5-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
8032 // CHECK5-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8033 // CHECK5-NEXT:    [[I:%.*]] = alloca i64, align 8
8034 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8035 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8036 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8037 // CHECK5-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8038 // CHECK5-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
8039 // CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8040 // CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8041 // CHECK5-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
8042 // CHECK5-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
8043 // CHECK5-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
8044 // CHECK5-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
8045 // CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8046 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8047 // CHECK5-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
8048 // CHECK5-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
8049 // CHECK5-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
8050 // CHECK5-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
8051 // CHECK5-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8052 // CHECK5:       cond.true:
8053 // CHECK5-NEXT:    br label [[COND_END:%.*]]
8054 // CHECK5:       cond.false:
8055 // CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
8056 // CHECK5-NEXT:    br label [[COND_END]]
8057 // CHECK5:       cond.end:
8058 // CHECK5-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
8059 // CHECK5-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
8060 // CHECK5-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
8061 // CHECK5-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
8062 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8063 // CHECK5:       omp.inner.for.cond:
8064 // CHECK5-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44
8065 // CHECK5-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !44
8066 // CHECK5-NEXT:    [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
8067 // CHECK5-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8068 // CHECK5:       omp.inner.for.body:
8069 // CHECK5-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44
8070 // CHECK5-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
8071 // CHECK5-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
8072 // CHECK5-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !44
8073 // CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !44
8074 // CHECK5-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
8075 // CHECK5-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !44
8076 // CHECK5-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !44
8077 // CHECK5-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
8078 // CHECK5-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
8079 // CHECK5-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
8080 // CHECK5-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !44
8081 // CHECK5-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
8082 // CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
8083 // CHECK5-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
8084 // CHECK5-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
8085 // CHECK5-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8086 // CHECK5:       omp.body.continue:
8087 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8088 // CHECK5:       omp.inner.for.inc:
8089 // CHECK5-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44
8090 // CHECK5-NEXT:    [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1
8091 // CHECK5-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44
8092 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
8093 // CHECK5:       omp.inner.for.end:
8094 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8095 // CHECK5:       omp.loop.exit:
8096 // CHECK5-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
8097 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8098 // CHECK5-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
8099 // CHECK5-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8100 // CHECK5:       .omp.final.then:
8101 // CHECK5-NEXT:    store i64 11, i64* [[I]], align 8
8102 // CHECK5-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8103 // CHECK5:       .omp.final.done:
8104 // CHECK5-NEXT:    ret void
8105 //
8106 //
8107 // CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
8108 // CHECK5-SAME: () #[[ATTR8:[0-9]+]] {
8109 // CHECK5-NEXT:  entry:
8110 // CHECK5-NEXT:    call void @__tgt_register_requires(i64 1)
8111 // CHECK5-NEXT:    ret void
8112 //
8113 //
8114 // CHECK6-LABEL: define {{[^@]+}}@_Z7get_valv
8115 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
8116 // CHECK6-NEXT:  entry:
8117 // CHECK6-NEXT:    ret i64 0
8118 //
8119 //
8120 // CHECK6-LABEL: define {{[^@]+}}@_Z3fooi
8121 // CHECK6-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
8122 // CHECK6-NEXT:  entry:
8123 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
8124 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
8125 // CHECK6-NEXT:    [[AA:%.*]] = alloca i16, align 2
8126 // CHECK6-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
8127 // CHECK6-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
8128 // CHECK6-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
8129 // CHECK6-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
8130 // CHECK6-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
8131 // CHECK6-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
8132 // CHECK6-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
8133 // CHECK6-NEXT:    [[K:%.*]] = alloca i64, align 8
8134 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8135 // CHECK6-NEXT:    [[K_CASTED:%.*]] = alloca i64, align 8
8136 // CHECK6-NEXT:    [[LIN:%.*]] = alloca i32, align 4
8137 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
8138 // CHECK6-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
8139 // CHECK6-NEXT:    [[A_CASTED4:%.*]] = alloca i64, align 8
8140 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
8141 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
8142 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
8143 // CHECK6-NEXT:    [[A_CASTED6:%.*]] = alloca i64, align 8
8144 // CHECK6-NEXT:    [[AA_CASTED8:%.*]] = alloca i64, align 8
8145 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8
8146 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8
8147 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8
8148 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8149 // CHECK6-NEXT:    [[A_CASTED15:%.*]] = alloca i64, align 8
8150 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8151 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8
8152 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8
8153 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8
8154 // CHECK6-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8
8155 // CHECK6-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
8156 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
8157 // CHECK6-NEXT:    store i32 0, i32* [[A]], align 4
8158 // CHECK6-NEXT:    store i16 0, i16* [[AA]], align 2
8159 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
8160 // CHECK6-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
8161 // CHECK6-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
8162 // CHECK6-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
8163 // CHECK6-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
8164 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
8165 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
8166 // CHECK6-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
8167 // CHECK6-NEXT:    [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
8168 // CHECK6-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
8169 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
8170 // CHECK6-NEXT:    [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
8171 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates*
8172 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0
8173 // CHECK6-NEXT:    [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]])
8174 // CHECK6-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
8175 // CHECK6-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
8176 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A]], align 4
8177 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8178 // CHECK6-NEXT:    store i32 [[TMP11]], i32* [[CONV]], align 4
8179 // CHECK6-NEXT:    [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8
8180 // CHECK6-NEXT:    [[TMP13:%.*]] = load i64, i64* [[K]], align 8
8181 // CHECK6-NEXT:    store i64 [[TMP13]], i64* [[K_CASTED]], align 8
8182 // CHECK6-NEXT:    [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8
8183 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]]
8184 // CHECK6-NEXT:    store i32 12, i32* [[LIN]], align 4
8185 // CHECK6-NEXT:    [[TMP15:%.*]] = load i16, i16* [[AA]], align 2
8186 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
8187 // CHECK6-NEXT:    store i16 [[TMP15]], i16* [[CONV2]], align 2
8188 // CHECK6-NEXT:    [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8
8189 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4
8190 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
8191 // CHECK6-NEXT:    store i32 [[TMP17]], i32* [[CONV3]], align 4
8192 // CHECK6-NEXT:    [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
8193 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A]], align 4
8194 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32*
8195 // CHECK6-NEXT:    store i32 [[TMP19]], i32* [[CONV5]], align 4
8196 // CHECK6-NEXT:    [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8
8197 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8198 // CHECK6-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
8199 // CHECK6-NEXT:    store i64 [[TMP16]], i64* [[TMP22]], align 8
8200 // CHECK6-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8201 // CHECK6-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64*
8202 // CHECK6-NEXT:    store i64 [[TMP16]], i64* [[TMP24]], align 8
8203 // CHECK6-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
8204 // CHECK6-NEXT:    store i8* null, i8** [[TMP25]], align 8
8205 // CHECK6-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
8206 // CHECK6-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
8207 // CHECK6-NEXT:    store i64 [[TMP18]], i64* [[TMP27]], align 8
8208 // CHECK6-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
8209 // CHECK6-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
8210 // CHECK6-NEXT:    store i64 [[TMP18]], i64* [[TMP29]], align 8
8211 // CHECK6-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
8212 // CHECK6-NEXT:    store i8* null, i8** [[TMP30]], align 8
8213 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
8214 // CHECK6-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
8215 // CHECK6-NEXT:    store i64 [[TMP20]], i64* [[TMP32]], align 8
8216 // CHECK6-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
8217 // CHECK6-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64*
8218 // CHECK6-NEXT:    store i64 [[TMP20]], i64* [[TMP34]], align 8
8219 // CHECK6-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
8220 // CHECK6-NEXT:    store i8* null, i8** [[TMP35]], align 8
8221 // CHECK6-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
8222 // CHECK6-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
8223 // CHECK6-NEXT:    [[TMP38:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
8224 // CHECK6-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
8225 // CHECK6-NEXT:    br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
8226 // CHECK6:       omp_offload.failed:
8227 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]]
8228 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
8229 // CHECK6:       omp_offload.cont:
8230 // CHECK6-NEXT:    [[TMP40:%.*]] = load i32, i32* [[A]], align 4
8231 // CHECK6-NEXT:    [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32*
8232 // CHECK6-NEXT:    store i32 [[TMP40]], i32* [[CONV7]], align 4
8233 // CHECK6-NEXT:    [[TMP41:%.*]] = load i64, i64* [[A_CASTED6]], align 8
8234 // CHECK6-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2
8235 // CHECK6-NEXT:    [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16*
8236 // CHECK6-NEXT:    store i16 [[TMP42]], i16* [[CONV9]], align 2
8237 // CHECK6-NEXT:    [[TMP43:%.*]] = load i64, i64* [[AA_CASTED8]], align 8
8238 // CHECK6-NEXT:    [[TMP44:%.*]] = load i32, i32* [[N_ADDR]], align 4
8239 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP44]], 10
8240 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
8241 // CHECK6:       omp_if.then:
8242 // CHECK6-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
8243 // CHECK6-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64*
8244 // CHECK6-NEXT:    store i64 [[TMP41]], i64* [[TMP46]], align 8
8245 // CHECK6-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
8246 // CHECK6-NEXT:    [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64*
8247 // CHECK6-NEXT:    store i64 [[TMP41]], i64* [[TMP48]], align 8
8248 // CHECK6-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
8249 // CHECK6-NEXT:    store i8* null, i8** [[TMP49]], align 8
8250 // CHECK6-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1
8251 // CHECK6-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64*
8252 // CHECK6-NEXT:    store i64 [[TMP43]], i64* [[TMP51]], align 8
8253 // CHECK6-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1
8254 // CHECK6-NEXT:    [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
8255 // CHECK6-NEXT:    store i64 [[TMP43]], i64* [[TMP53]], align 8
8256 // CHECK6-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1
8257 // CHECK6-NEXT:    store i8* null, i8** [[TMP54]], align 8
8258 // CHECK6-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
8259 // CHECK6-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
8260 // CHECK6-NEXT:    [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
8261 // CHECK6-NEXT:    [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
8262 // CHECK6-NEXT:    br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
8263 // CHECK6:       omp_offload.failed13:
8264 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]]
8265 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT14]]
8266 // CHECK6:       omp_offload.cont14:
8267 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
8268 // CHECK6:       omp_if.else:
8269 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP41]], i64 [[TMP43]]) #[[ATTR4]]
8270 // CHECK6-NEXT:    br label [[OMP_IF_END]]
8271 // CHECK6:       omp_if.end:
8272 // CHECK6-NEXT:    [[TMP59:%.*]] = load i32, i32* [[A]], align 4
8273 // CHECK6-NEXT:    store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR_]], align 4
8274 // CHECK6-NEXT:    [[TMP60:%.*]] = load i32, i32* [[A]], align 4
8275 // CHECK6-NEXT:    [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32*
8276 // CHECK6-NEXT:    store i32 [[TMP60]], i32* [[CONV16]], align 4
8277 // CHECK6-NEXT:    [[TMP61:%.*]] = load i64, i64* [[A_CASTED15]], align 8
8278 // CHECK6-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8279 // CHECK6-NEXT:    [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
8280 // CHECK6-NEXT:    store i32 [[TMP62]], i32* [[CONV17]], align 4
8281 // CHECK6-NEXT:    [[TMP63:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
8282 // CHECK6-NEXT:    [[TMP64:%.*]] = load i32, i32* [[N_ADDR]], align 4
8283 // CHECK6-NEXT:    [[CMP18:%.*]] = icmp sgt i32 [[TMP64]], 20
8284 // CHECK6-NEXT:    br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]]
8285 // CHECK6:       omp_if.then19:
8286 // CHECK6-NEXT:    [[TMP65:%.*]] = mul nuw i64 [[TMP2]], 4
8287 // CHECK6-NEXT:    [[TMP66:%.*]] = mul nuw i64 5, [[TMP5]]
8288 // CHECK6-NEXT:    [[TMP67:%.*]] = mul nuw i64 [[TMP66]], 8
8289 // CHECK6-NEXT:    [[TMP68:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
8290 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP68]], i8* align 8 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i64 80, i1 false)
8291 // CHECK6-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
8292 // CHECK6-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
8293 // CHECK6-NEXT:    store i64 [[TMP61]], i64* [[TMP70]], align 8
8294 // CHECK6-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
8295 // CHECK6-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
8296 // CHECK6-NEXT:    store i64 [[TMP61]], i64* [[TMP72]], align 8
8297 // CHECK6-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
8298 // CHECK6-NEXT:    store i8* null, i8** [[TMP73]], align 8
8299 // CHECK6-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
8300 // CHECK6-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]**
8301 // CHECK6-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8
8302 // CHECK6-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
8303 // CHECK6-NEXT:    [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]**
8304 // CHECK6-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8
8305 // CHECK6-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
8306 // CHECK6-NEXT:    store i8* null, i8** [[TMP78]], align 8
8307 // CHECK6-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
8308 // CHECK6-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64*
8309 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[TMP80]], align 8
8310 // CHECK6-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
8311 // CHECK6-NEXT:    [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64*
8312 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[TMP82]], align 8
8313 // CHECK6-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
8314 // CHECK6-NEXT:    store i8* null, i8** [[TMP83]], align 8
8315 // CHECK6-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
8316 // CHECK6-NEXT:    [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float**
8317 // CHECK6-NEXT:    store float* [[VLA]], float** [[TMP85]], align 8
8318 // CHECK6-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
8319 // CHECK6-NEXT:    [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float**
8320 // CHECK6-NEXT:    store float* [[VLA]], float** [[TMP87]], align 8
8321 // CHECK6-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
8322 // CHECK6-NEXT:    store i64 [[TMP65]], i64* [[TMP88]], align 8
8323 // CHECK6-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
8324 // CHECK6-NEXT:    store i8* null, i8** [[TMP89]], align 8
8325 // CHECK6-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
8326 // CHECK6-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]**
8327 // CHECK6-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 8
8328 // CHECK6-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
8329 // CHECK6-NEXT:    [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]**
8330 // CHECK6-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8
8331 // CHECK6-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
8332 // CHECK6-NEXT:    store i8* null, i8** [[TMP94]], align 8
8333 // CHECK6-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5
8334 // CHECK6-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64*
8335 // CHECK6-NEXT:    store i64 5, i64* [[TMP96]], align 8
8336 // CHECK6-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5
8337 // CHECK6-NEXT:    [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64*
8338 // CHECK6-NEXT:    store i64 5, i64* [[TMP98]], align 8
8339 // CHECK6-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5
8340 // CHECK6-NEXT:    store i8* null, i8** [[TMP99]], align 8
8341 // CHECK6-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6
8342 // CHECK6-NEXT:    [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64*
8343 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[TMP101]], align 8
8344 // CHECK6-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6
8345 // CHECK6-NEXT:    [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i64*
8346 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[TMP103]], align 8
8347 // CHECK6-NEXT:    [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6
8348 // CHECK6-NEXT:    store i8* null, i8** [[TMP104]], align 8
8349 // CHECK6-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7
8350 // CHECK6-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double**
8351 // CHECK6-NEXT:    store double* [[VLA1]], double** [[TMP106]], align 8
8352 // CHECK6-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7
8353 // CHECK6-NEXT:    [[TMP108:%.*]] = bitcast i8** [[TMP107]] to double**
8354 // CHECK6-NEXT:    store double* [[VLA1]], double** [[TMP108]], align 8
8355 // CHECK6-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
8356 // CHECK6-NEXT:    store i64 [[TMP67]], i64* [[TMP109]], align 8
8357 // CHECK6-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7
8358 // CHECK6-NEXT:    store i8* null, i8** [[TMP110]], align 8
8359 // CHECK6-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8
8360 // CHECK6-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to %struct.TT**
8361 // CHECK6-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP112]], align 8
8362 // CHECK6-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8
8363 // CHECK6-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to %struct.TT**
8364 // CHECK6-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP114]], align 8
8365 // CHECK6-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8
8366 // CHECK6-NEXT:    store i8* null, i8** [[TMP115]], align 8
8367 // CHECK6-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9
8368 // CHECK6-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64*
8369 // CHECK6-NEXT:    store i64 [[TMP63]], i64* [[TMP117]], align 8
8370 // CHECK6-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9
8371 // CHECK6-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i64*
8372 // CHECK6-NEXT:    store i64 [[TMP63]], i64* [[TMP119]], align 8
8373 // CHECK6-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9
8374 // CHECK6-NEXT:    store i8* null, i8** [[TMP120]], align 8
8375 // CHECK6-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
8376 // CHECK6-NEXT:    [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
8377 // CHECK6-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
8378 // CHECK6-NEXT:    [[TMP124:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP121]], i8** [[TMP122]], i64* [[TMP123]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
8379 // CHECK6-NEXT:    [[TMP125:%.*]] = icmp ne i32 [[TMP124]], 0
8380 // CHECK6-NEXT:    br i1 [[TMP125]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]]
8381 // CHECK6:       omp_offload.failed23:
8382 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]]
8383 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT24]]
8384 // CHECK6:       omp_offload.cont24:
8385 // CHECK6-NEXT:    br label [[OMP_IF_END26:%.*]]
8386 // CHECK6:       omp_if.else25:
8387 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]]
8388 // CHECK6-NEXT:    br label [[OMP_IF_END26]]
8389 // CHECK6:       omp_if.end26:
8390 // CHECK6-NEXT:    [[TMP126:%.*]] = load i32, i32* [[A]], align 4
8391 // CHECK6-NEXT:    [[TMP127:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
8392 // CHECK6-NEXT:    call void @llvm.stackrestore(i8* [[TMP127]])
8393 // CHECK6-NEXT:    ret i32 [[TMP126]]
8394 //
8395 //
8396 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
8397 // CHECK6-SAME: () #[[ATTR2:[0-9]+]] {
8398 // CHECK6-NEXT:  entry:
8399 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
8400 // CHECK6-NEXT:    ret void
8401 //
8402 //
8403 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
8404 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
8405 // CHECK6-NEXT:  entry:
8406 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8407 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8408 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8409 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8410 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8411 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8412 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8413 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8414 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
8415 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8416 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8417 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8418 // CHECK6-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
8419 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8420 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8421 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8422 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
8423 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8424 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8425 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
8426 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8427 // CHECK6:       cond.true:
8428 // CHECK6-NEXT:    br label [[COND_END:%.*]]
8429 // CHECK6:       cond.false:
8430 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8431 // CHECK6-NEXT:    br label [[COND_END]]
8432 // CHECK6:       cond.end:
8433 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
8434 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8435 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8436 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
8437 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8438 // CHECK6:       omp.inner.for.cond:
8439 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
8440 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
8441 // CHECK6-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
8442 // CHECK6-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8443 // CHECK6:       omp.inner.for.body:
8444 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
8445 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
8446 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
8447 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
8448 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8449 // CHECK6:       omp.body.continue:
8450 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8451 // CHECK6:       omp.inner.for.inc:
8452 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
8453 // CHECK6-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
8454 // CHECK6-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
8455 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
8456 // CHECK6:       omp.inner.for.end:
8457 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8458 // CHECK6:       omp.loop.exit:
8459 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
8460 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8461 // CHECK6-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
8462 // CHECK6-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8463 // CHECK6:       .omp.final.then:
8464 // CHECK6-NEXT:    store i32 33, i32* [[I]], align 4
8465 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8466 // CHECK6:       .omp.final.done:
8467 // CHECK6-NEXT:    ret void
8468 //
8469 //
8470 // CHECK6-LABEL: define {{[^@]+}}@.omp_task_entry.
8471 // CHECK6-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
8472 // CHECK6-NEXT:  entry:
8473 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
8474 // CHECK6-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
8475 // CHECK6-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
8476 // CHECK6-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
8477 // CHECK6-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
8478 // CHECK6-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
8479 // CHECK6-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
8480 // CHECK6-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
8481 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
8482 // CHECK6-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
8483 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
8484 // CHECK6-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
8485 // CHECK6-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
8486 // CHECK6-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
8487 // CHECK6-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
8488 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
8489 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
8490 // CHECK6-NEXT:    [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
8491 // CHECK6-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]])
8492 // CHECK6-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]])
8493 // CHECK6-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
8494 // CHECK6-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]])
8495 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25
8496 // CHECK6-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25
8497 // CHECK6-NEXT:    store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25
8498 // CHECK6-NEXT:    store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25
8499 // CHECK6-NEXT:    store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25
8500 // CHECK6-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25
8501 // CHECK6-NEXT:    [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25
8502 // CHECK6-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
8503 // CHECK6-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
8504 // CHECK6-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
8505 // CHECK6:       omp_offload.failed.i:
8506 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]]
8507 // CHECK6-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
8508 // CHECK6:       .omp_outlined..1.exit:
8509 // CHECK6-NEXT:    ret i32 0
8510 //
8511 //
8512 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101
8513 // CHECK6-SAME: (i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] {
8514 // CHECK6-NEXT:  entry:
8515 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8516 // CHECK6-NEXT:    [[K_ADDR:%.*]] = alloca i64, align 8
8517 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8518 // CHECK6-NEXT:    [[K_CASTED:%.*]] = alloca i64, align 8
8519 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8520 // CHECK6-NEXT:    store i64 [[K]], i64* [[K_ADDR]], align 8
8521 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8522 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
8523 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8524 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[CONV1]], align 4
8525 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
8526 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8
8527 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[K_CASTED]], align 8
8528 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8
8529 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
8530 // CHECK6-NEXT:    ret void
8531 //
8532 //
8533 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..2
8534 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] {
8535 // CHECK6-NEXT:  entry:
8536 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8537 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8538 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8539 // CHECK6-NEXT:    [[K_ADDR:%.*]] = alloca i64, align 8
8540 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8541 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8542 // CHECK6-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
8543 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8544 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8545 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8546 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8547 // CHECK6-NEXT:    [[I:%.*]] = alloca i32, align 4
8548 // CHECK6-NEXT:    [[K1:%.*]] = alloca i64, align 8
8549 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8550 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8551 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8552 // CHECK6-NEXT:    store i64 [[K]], i64* [[K_ADDR]], align 8
8553 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8554 // CHECK6-NEXT:    [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8
8555 // CHECK6-NEXT:    store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8
8556 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8557 // CHECK6-NEXT:    store i32 8, i32* [[DOTOMP_UB]], align 4
8558 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8559 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8560 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8561 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
8562 // CHECK6-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]])
8563 // CHECK6-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1073741859, i32 0, i32 8, i32 1, i32 1)
8564 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
8565 // CHECK6:       omp.dispatch.cond:
8566 // CHECK6-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
8567 // CHECK6-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0
8568 // CHECK6-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
8569 // CHECK6:       omp.dispatch.body:
8570 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8571 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
8572 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8573 // CHECK6:       omp.inner.for.cond:
8574 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
8575 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !26
8576 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
8577 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8578 // CHECK6:       omp.inner.for.body:
8579 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
8580 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
8581 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL]]
8582 // CHECK6-NEXT:    store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !26
8583 // CHECK6-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !26
8584 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
8585 // CHECK6-NEXT:    [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3
8586 // CHECK6-NEXT:    [[CONV3:%.*]] = sext i32 [[MUL2]] to i64
8587 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]]
8588 // CHECK6-NEXT:    store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !26
8589 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !26
8590 // CHECK6-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1
8591 // CHECK6-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !26
8592 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8593 // CHECK6:       omp.body.continue:
8594 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8595 // CHECK6:       omp.inner.for.inc:
8596 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
8597 // CHECK6-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1
8598 // CHECK6-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
8599 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
8600 // CHECK6:       omp.inner.for.end:
8601 // CHECK6-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
8602 // CHECK6:       omp.dispatch.inc:
8603 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND]]
8604 // CHECK6:       omp.dispatch.end:
8605 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8606 // CHECK6-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
8607 // CHECK6-NEXT:    br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8608 // CHECK6:       .omp.final.then:
8609 // CHECK6-NEXT:    store i32 1, i32* [[I]], align 4
8610 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8611 // CHECK6:       .omp.final.done:
8612 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8613 // CHECK6-NEXT:    [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
8614 // CHECK6-NEXT:    br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
8615 // CHECK6:       .omp.linear.pu:
8616 // CHECK6-NEXT:    [[TMP16:%.*]] = load i64, i64* [[K1]], align 8
8617 // CHECK6-NEXT:    store i64 [[TMP16]], i64* [[K_ADDR]], align 8
8618 // CHECK6-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
8619 // CHECK6:       .omp.linear.pu.done:
8620 // CHECK6-NEXT:    ret void
8621 //
8622 //
8623 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
8624 // CHECK6-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR2]] {
8625 // CHECK6-NEXT:  entry:
8626 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
8627 // CHECK6-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
8628 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8629 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
8630 // CHECK6-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
8631 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8632 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8633 // CHECK6-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
8634 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8635 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8636 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
8637 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8638 // CHECK6-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
8639 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
8640 // CHECK6-NEXT:    store i16 [[TMP0]], i16* [[CONV3]], align 2
8641 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
8642 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
8643 // CHECK6-NEXT:    [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
8644 // CHECK6-NEXT:    store i32 [[TMP2]], i32* [[CONV4]], align 4
8645 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
8646 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4
8647 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8648 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[CONV5]], align 4
8649 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8
8650 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]])
8651 // CHECK6-NEXT:    ret void
8652 //
8653 //
8654 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..3
8655 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] {
8656 // CHECK6-NEXT:  entry:
8657 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8658 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8659 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
8660 // CHECK6-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
8661 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8662 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
8663 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i64, align 8
8664 // CHECK6-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
8665 // CHECK6-NEXT:    [[DOTLINEAR_START3:%.*]] = alloca i32, align 4
8666 // CHECK6-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
8667 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
8668 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
8669 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
8670 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8671 // CHECK6-NEXT:    [[IT:%.*]] = alloca i64, align 8
8672 // CHECK6-NEXT:    [[LIN4:%.*]] = alloca i32, align 4
8673 // CHECK6-NEXT:    [[A5:%.*]] = alloca i32, align 4
8674 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8675 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8676 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8677 // CHECK6-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
8678 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8679 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8680 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
8681 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8682 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4
8683 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
8684 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4
8685 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4
8686 // CHECK6-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
8687 // CHECK6-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
8688 // CHECK6-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
8689 // CHECK6-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
8690 // CHECK6-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
8691 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8692 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8693 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
8694 // CHECK6-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]])
8695 // CHECK6-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
8696 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
8697 // CHECK6-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
8698 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8699 // CHECK6:       cond.true:
8700 // CHECK6-NEXT:    br label [[COND_END:%.*]]
8701 // CHECK6:       cond.false:
8702 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
8703 // CHECK6-NEXT:    br label [[COND_END]]
8704 // CHECK6:       cond.end:
8705 // CHECK6-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
8706 // CHECK6-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
8707 // CHECK6-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
8708 // CHECK6-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
8709 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8710 // CHECK6:       omp.inner.for.cond:
8711 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
8712 // CHECK6-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29
8713 // CHECK6-NEXT:    [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
8714 // CHECK6-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8715 // CHECK6:       omp.inner.for.body:
8716 // CHECK6-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
8717 // CHECK6-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
8718 // CHECK6-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
8719 // CHECK6-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !29
8720 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !29
8721 // CHECK6-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP10]] to i64
8722 // CHECK6-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
8723 // CHECK6-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29
8724 // CHECK6-NEXT:    [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]]
8725 // CHECK6-NEXT:    [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]]
8726 // CHECK6-NEXT:    [[CONV9:%.*]] = trunc i64 [[ADD]] to i32
8727 // CHECK6-NEXT:    store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !29
8728 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !29
8729 // CHECK6-NEXT:    [[CONV10:%.*]] = sext i32 [[TMP13]] to i64
8730 // CHECK6-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
8731 // CHECK6-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !29
8732 // CHECK6-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]]
8733 // CHECK6-NEXT:    [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]]
8734 // CHECK6-NEXT:    [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32
8735 // CHECK6-NEXT:    store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !29
8736 // CHECK6-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !29
8737 // CHECK6-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP16]] to i32
8738 // CHECK6-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
8739 // CHECK6-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
8740 // CHECK6-NEXT:    store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group !29
8741 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8742 // CHECK6:       omp.body.continue:
8743 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8744 // CHECK6:       omp.inner.for.inc:
8745 // CHECK6-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
8746 // CHECK6-NEXT:    [[ADD17:%.*]] = add i64 [[TMP17]], 1
8747 // CHECK6-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
8748 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
8749 // CHECK6:       omp.inner.for.end:
8750 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8751 // CHECK6:       omp.loop.exit:
8752 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
8753 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8754 // CHECK6-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
8755 // CHECK6-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8756 // CHECK6:       .omp.final.then:
8757 // CHECK6-NEXT:    store i64 400, i64* [[IT]], align 8
8758 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8759 // CHECK6:       .omp.final.done:
8760 // CHECK6-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8761 // CHECK6-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
8762 // CHECK6-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
8763 // CHECK6:       .omp.linear.pu:
8764 // CHECK6-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4
8765 // CHECK6-NEXT:    store i32 [[TMP22]], i32* [[CONV1]], align 4
8766 // CHECK6-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A5]], align 4
8767 // CHECK6-NEXT:    store i32 [[TMP23]], i32* [[CONV2]], align 4
8768 // CHECK6-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
8769 // CHECK6:       .omp.linear.pu.done:
8770 // CHECK6-NEXT:    ret void
8771 //
8772 //
8773 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
8774 // CHECK6-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] {
8775 // CHECK6-NEXT:  entry:
8776 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8777 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
8778 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8779 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
8780 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8781 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8782 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8783 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8784 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
8785 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8786 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
8787 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
8788 // CHECK6-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
8789 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
8790 // CHECK6-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
8791 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
8792 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
8793 // CHECK6-NEXT:    ret void
8794 //
8795 //
8796 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..4
8797 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] {
8798 // CHECK6-NEXT:  entry:
8799 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8800 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8801 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8802 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
8803 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8804 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i16, align 2
8805 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8806 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8807 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8808 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8809 // CHECK6-NEXT:    [[IT:%.*]] = alloca i16, align 2
8810 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8811 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8812 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8813 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
8814 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8815 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
8816 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8817 // CHECK6-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
8818 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8819 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8820 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8821 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
8822 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8823 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8824 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
8825 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8826 // CHECK6:       cond.true:
8827 // CHECK6-NEXT:    br label [[COND_END:%.*]]
8828 // CHECK6:       cond.false:
8829 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8830 // CHECK6-NEXT:    br label [[COND_END]]
8831 // CHECK6:       cond.end:
8832 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
8833 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8834 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8835 // CHECK6-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
8836 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8837 // CHECK6:       omp.inner.for.cond:
8838 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
8839 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !32
8840 // CHECK6-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
8841 // CHECK6-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8842 // CHECK6:       omp.inner.for.body:
8843 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
8844 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
8845 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
8846 // CHECK6-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
8847 // CHECK6-NEXT:    store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !32
8848 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !32
8849 // CHECK6-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
8850 // CHECK6-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !32
8851 // CHECK6-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !32
8852 // CHECK6-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
8853 // CHECK6-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
8854 // CHECK6-NEXT:    [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
8855 // CHECK6-NEXT:    store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group !32
8856 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8857 // CHECK6:       omp.body.continue:
8858 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8859 // CHECK6:       omp.inner.for.inc:
8860 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
8861 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
8862 // CHECK6-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
8863 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
8864 // CHECK6:       omp.inner.for.end:
8865 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8866 // CHECK6:       omp.loop.exit:
8867 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
8868 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8869 // CHECK6-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
8870 // CHECK6-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8871 // CHECK6:       .omp.final.then:
8872 // CHECK6-NEXT:    store i16 22, i16* [[IT]], align 2
8873 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8874 // CHECK6:       .omp.final.done:
8875 // CHECK6-NEXT:    ret void
8876 //
8877 //
8878 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
8879 // CHECK6-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
8880 // CHECK6-NEXT:  entry:
8881 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8882 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
8883 // CHECK6-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
8884 // CHECK6-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
8885 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
8886 // CHECK6-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
8887 // CHECK6-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
8888 // CHECK6-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
8889 // CHECK6-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
8890 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
8891 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
8892 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8893 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8894 // CHECK6-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
8895 // CHECK6-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
8896 // CHECK6-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
8897 // CHECK6-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
8898 // CHECK6-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
8899 // CHECK6-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
8900 // CHECK6-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
8901 // CHECK6-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
8902 // CHECK6-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
8903 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8904 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
8905 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
8906 // CHECK6-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
8907 // CHECK6-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
8908 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
8909 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
8910 // CHECK6-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
8911 // CHECK6-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
8912 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
8913 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
8914 // CHECK6-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
8915 // CHECK6-NEXT:    store i32 [[TMP8]], i32* [[CONV6]], align 4
8916 // CHECK6-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
8917 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4
8918 // CHECK6-NEXT:    [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
8919 // CHECK6-NEXT:    store i32 [[TMP10]], i32* [[CONV7]], align 4
8920 // CHECK6-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
8921 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]])
8922 // CHECK6-NEXT:    ret void
8923 //
8924 //
8925 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..7
8926 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
8927 // CHECK6-NEXT:  entry:
8928 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8929 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8930 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
8931 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
8932 // CHECK6-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
8933 // CHECK6-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
8934 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
8935 // CHECK6-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
8936 // CHECK6-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
8937 // CHECK6-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
8938 // CHECK6-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
8939 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
8940 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8941 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i8, align 1
8942 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8943 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8944 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8945 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8946 // CHECK6-NEXT:    [[IT:%.*]] = alloca i8, align 1
8947 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8948 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8949 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
8950 // CHECK6-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
8951 // CHECK6-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
8952 // CHECK6-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
8953 // CHECK6-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
8954 // CHECK6-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
8955 // CHECK6-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
8956 // CHECK6-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
8957 // CHECK6-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
8958 // CHECK6-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
8959 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
8960 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
8961 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
8962 // CHECK6-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
8963 // CHECK6-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
8964 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
8965 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
8966 // CHECK6-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
8967 // CHECK6-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
8968 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
8969 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8970 // CHECK6-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
8971 // CHECK6-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8972 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8973 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4
8974 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8975 // CHECK6-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
8976 // CHECK6-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
8977 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
8978 // CHECK6:       omp.dispatch.cond:
8979 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8980 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
8981 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8982 // CHECK6:       cond.true:
8983 // CHECK6-NEXT:    br label [[COND_END:%.*]]
8984 // CHECK6:       cond.false:
8985 // CHECK6-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8986 // CHECK6-NEXT:    br label [[COND_END]]
8987 // CHECK6:       cond.end:
8988 // CHECK6-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
8989 // CHECK6-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8990 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8991 // CHECK6-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
8992 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8993 // CHECK6-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8994 // CHECK6-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
8995 // CHECK6-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
8996 // CHECK6:       omp.dispatch.body:
8997 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8998 // CHECK6:       omp.inner.for.cond:
8999 // CHECK6-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9000 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
9001 // CHECK6-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
9002 // CHECK6-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9003 // CHECK6:       omp.inner.for.body:
9004 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9005 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
9006 // CHECK6-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
9007 // CHECK6-NEXT:    [[CONV8:%.*]] = trunc i32 [[SUB]] to i8
9008 // CHECK6-NEXT:    store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !35
9009 // CHECK6-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !35
9010 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
9011 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group !35
9012 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
9013 // CHECK6-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !35
9014 // CHECK6-NEXT:    [[CONV9:%.*]] = fpext float [[TMP20]] to double
9015 // CHECK6-NEXT:    [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
9016 // CHECK6-NEXT:    [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
9017 // CHECK6-NEXT:    store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !35
9018 // CHECK6-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
9019 // CHECK6-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !35
9020 // CHECK6-NEXT:    [[CONV13:%.*]] = fpext float [[TMP21]] to double
9021 // CHECK6-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
9022 // CHECK6-NEXT:    [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
9023 // CHECK6-NEXT:    store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !35
9024 // CHECK6-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
9025 // CHECK6-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
9026 // CHECK6-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !35
9027 // CHECK6-NEXT:    [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00
9028 // CHECK6-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !35
9029 // CHECK6-NEXT:    [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]]
9030 // CHECK6-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]]
9031 // CHECK6-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
9032 // CHECK6-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !35
9033 // CHECK6-NEXT:    [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00
9034 // CHECK6-NEXT:    store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !35
9035 // CHECK6-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
9036 // CHECK6-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !35
9037 // CHECK6-NEXT:    [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1
9038 // CHECK6-NEXT:    store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !35
9039 // CHECK6-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
9040 // CHECK6-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !35
9041 // CHECK6-NEXT:    [[CONV23:%.*]] = sext i8 [[TMP26]] to i32
9042 // CHECK6-NEXT:    [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
9043 // CHECK6-NEXT:    [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
9044 // CHECK6-NEXT:    store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !35
9045 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9046 // CHECK6:       omp.body.continue:
9047 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9048 // CHECK6:       omp.inner.for.inc:
9049 // CHECK6-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9050 // CHECK6-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1
9051 // CHECK6-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
9052 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
9053 // CHECK6:       omp.inner.for.end:
9054 // CHECK6-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
9055 // CHECK6:       omp.dispatch.inc:
9056 // CHECK6-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9057 // CHECK6-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9058 // CHECK6-NEXT:    [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
9059 // CHECK6-NEXT:    store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4
9060 // CHECK6-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9061 // CHECK6-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
9062 // CHECK6-NEXT:    [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
9063 // CHECK6-NEXT:    store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4
9064 // CHECK6-NEXT:    br label [[OMP_DISPATCH_COND]]
9065 // CHECK6:       omp.dispatch.end:
9066 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
9067 // CHECK6-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9068 // CHECK6-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
9069 // CHECK6-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9070 // CHECK6:       .omp.final.then:
9071 // CHECK6-NEXT:    store i8 96, i8* [[IT]], align 1
9072 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9073 // CHECK6:       .omp.final.done:
9074 // CHECK6-NEXT:    ret void
9075 //
9076 //
9077 // CHECK6-LABEL: define {{[^@]+}}@_Z3bari
9078 // CHECK6-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
9079 // CHECK6-NEXT:  entry:
9080 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
9081 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
9082 // CHECK6-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
9083 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
9084 // CHECK6-NEXT:    store i32 0, i32* [[A]], align 4
9085 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
9086 // CHECK6-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
9087 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
9088 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
9089 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
9090 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
9091 // CHECK6-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
9092 // CHECK6-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
9093 // CHECK6-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
9094 // CHECK6-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
9095 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
9096 // CHECK6-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
9097 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
9098 // CHECK6-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
9099 // CHECK6-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
9100 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
9101 // CHECK6-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
9102 // CHECK6-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
9103 // CHECK6-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
9104 // CHECK6-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
9105 // CHECK6-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
9106 // CHECK6-NEXT:    ret i32 [[TMP8]]
9107 //
9108 //
9109 // CHECK6-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
9110 // CHECK6-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
9111 // CHECK6-NEXT:  entry:
9112 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
9113 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
9114 // CHECK6-NEXT:    [[B:%.*]] = alloca i32, align 4
9115 // CHECK6-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
9116 // CHECK6-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
9117 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
9118 // CHECK6-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
9119 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9120 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8
9121 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8
9122 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8
9123 // CHECK6-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8
9124 // CHECK6-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
9125 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
9126 // CHECK6-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
9127 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
9128 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
9129 // CHECK6-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
9130 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
9131 // CHECK6-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
9132 // CHECK6-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
9133 // CHECK6-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
9134 // CHECK6-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
9135 // CHECK6-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
9136 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
9137 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
9138 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
9139 // CHECK6-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
9140 // CHECK6-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
9141 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B]], align 4
9142 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32*
9143 // CHECK6-NEXT:    store i32 [[TMP6]], i32* [[CONV]], align 4
9144 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[B_CASTED]], align 8
9145 // CHECK6-NEXT:    [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
9146 // CHECK6-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
9147 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
9148 // CHECK6-NEXT:    [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL]] to i8
9149 // CHECK6-NEXT:    store i8 [[FROMBOOL3]], i8* [[CONV2]], align 1
9150 // CHECK6-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
9151 // CHECK6-NEXT:    [[TMP10:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
9152 // CHECK6-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP10]] to i1
9153 // CHECK6-NEXT:    br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
9154 // CHECK6:       omp_if.then:
9155 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
9156 // CHECK6-NEXT:    [[TMP11:%.*]] = mul nuw i64 2, [[TMP2]]
9157 // CHECK6-NEXT:    [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
9158 // CHECK6-NEXT:    [[TMP13:%.*]] = bitcast [6 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
9159 // CHECK6-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP13]], i8* align 8 bitcast ([6 x i64]* @.offload_sizes.11 to i8*), i64 48, i1 false)
9160 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9161 // CHECK6-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.S1**
9162 // CHECK6-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP15]], align 8
9163 // CHECK6-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9164 // CHECK6-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to double**
9165 // CHECK6-NEXT:    store double* [[A]], double** [[TMP17]], align 8
9166 // CHECK6-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
9167 // CHECK6-NEXT:    store i8* null, i8** [[TMP18]], align 8
9168 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
9169 // CHECK6-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
9170 // CHECK6-NEXT:    store i64 [[TMP7]], i64* [[TMP20]], align 8
9171 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
9172 // CHECK6-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64*
9173 // CHECK6-NEXT:    store i64 [[TMP7]], i64* [[TMP22]], align 8
9174 // CHECK6-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
9175 // CHECK6-NEXT:    store i8* null, i8** [[TMP23]], align 8
9176 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
9177 // CHECK6-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64*
9178 // CHECK6-NEXT:    store i64 2, i64* [[TMP25]], align 8
9179 // CHECK6-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
9180 // CHECK6-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
9181 // CHECK6-NEXT:    store i64 2, i64* [[TMP27]], align 8
9182 // CHECK6-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
9183 // CHECK6-NEXT:    store i8* null, i8** [[TMP28]], align 8
9184 // CHECK6-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
9185 // CHECK6-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64*
9186 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[TMP30]], align 8
9187 // CHECK6-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
9188 // CHECK6-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64*
9189 // CHECK6-NEXT:    store i64 [[TMP2]], i64* [[TMP32]], align 8
9190 // CHECK6-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
9191 // CHECK6-NEXT:    store i8* null, i8** [[TMP33]], align 8
9192 // CHECK6-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
9193 // CHECK6-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
9194 // CHECK6-NEXT:    store i16* [[VLA]], i16** [[TMP35]], align 8
9195 // CHECK6-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
9196 // CHECK6-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
9197 // CHECK6-NEXT:    store i16* [[VLA]], i16** [[TMP37]], align 8
9198 // CHECK6-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
9199 // CHECK6-NEXT:    store i64 [[TMP12]], i64* [[TMP38]], align 8
9200 // CHECK6-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
9201 // CHECK6-NEXT:    store i8* null, i8** [[TMP39]], align 8
9202 // CHECK6-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
9203 // CHECK6-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
9204 // CHECK6-NEXT:    store i64 [[TMP9]], i64* [[TMP41]], align 8
9205 // CHECK6-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
9206 // CHECK6-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
9207 // CHECK6-NEXT:    store i64 [[TMP9]], i64* [[TMP43]], align 8
9208 // CHECK6-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
9209 // CHECK6-NEXT:    store i8* null, i8** [[TMP44]], align 8
9210 // CHECK6-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9211 // CHECK6-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9212 // CHECK6-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
9213 // CHECK6-NEXT:    [[TMP48:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
9214 // CHECK6-NEXT:    [[TOBOOL5:%.*]] = trunc i8 [[TMP48]] to i1
9215 // CHECK6-NEXT:    [[TMP49:%.*]] = select i1 [[TOBOOL5]], i32 0, i32 1
9216 // CHECK6-NEXT:    [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP45]], i8** [[TMP46]], i64* [[TMP47]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP49]])
9217 // CHECK6-NEXT:    [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0
9218 // CHECK6-NEXT:    br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
9219 // CHECK6:       omp_offload.failed:
9220 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]]
9221 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
9222 // CHECK6:       omp_offload.cont:
9223 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
9224 // CHECK6:       omp_if.else:
9225 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]]
9226 // CHECK6-NEXT:    br label [[OMP_IF_END]]
9227 // CHECK6:       omp_if.end:
9228 // CHECK6-NEXT:    [[TMP52:%.*]] = mul nsw i64 1, [[TMP2]]
9229 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP52]]
9230 // CHECK6-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
9231 // CHECK6-NEXT:    [[TMP53:%.*]] = load i16, i16* [[ARRAYIDX6]], align 2
9232 // CHECK6-NEXT:    [[CONV7:%.*]] = sext i16 [[TMP53]] to i32
9233 // CHECK6-NEXT:    [[TMP54:%.*]] = load i32, i32* [[B]], align 4
9234 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i32 [[CONV7]], [[TMP54]]
9235 // CHECK6-NEXT:    [[TMP55:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
9236 // CHECK6-NEXT:    call void @llvm.stackrestore(i8* [[TMP55]])
9237 // CHECK6-NEXT:    ret i32 [[ADD8]]
9238 //
9239 //
9240 // CHECK6-LABEL: define {{[^@]+}}@_ZL7fstatici
9241 // CHECK6-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
9242 // CHECK6-NEXT:  entry:
9243 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
9244 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
9245 // CHECK6-NEXT:    [[AA:%.*]] = alloca i16, align 2
9246 // CHECK6-NEXT:    [[AAA:%.*]] = alloca i8, align 1
9247 // CHECK6-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
9248 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
9249 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
9250 // CHECK6-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
9251 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
9252 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
9253 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
9254 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
9255 // CHECK6-NEXT:    store i32 0, i32* [[A]], align 4
9256 // CHECK6-NEXT:    store i16 0, i16* [[AA]], align 2
9257 // CHECK6-NEXT:    store i8 0, i8* [[AAA]], align 1
9258 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
9259 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
9260 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
9261 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
9262 // CHECK6-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
9263 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
9264 // CHECK6-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
9265 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
9266 // CHECK6-NEXT:    [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
9267 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
9268 // CHECK6-NEXT:    store i8 [[TMP4]], i8* [[CONV2]], align 1
9269 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
9270 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
9271 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
9272 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
9273 // CHECK6:       omp_if.then:
9274 // CHECK6-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9275 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
9276 // CHECK6-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
9277 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9278 // CHECK6-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64*
9279 // CHECK6-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
9280 // CHECK6-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
9281 // CHECK6-NEXT:    store i8* null, i8** [[TMP11]], align 8
9282 // CHECK6-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
9283 // CHECK6-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
9284 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
9285 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
9286 // CHECK6-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
9287 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP15]], align 8
9288 // CHECK6-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
9289 // CHECK6-NEXT:    store i8* null, i8** [[TMP16]], align 8
9290 // CHECK6-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
9291 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64*
9292 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[TMP18]], align 8
9293 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
9294 // CHECK6-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64*
9295 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[TMP20]], align 8
9296 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
9297 // CHECK6-NEXT:    store i8* null, i8** [[TMP21]], align 8
9298 // CHECK6-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
9299 // CHECK6-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
9300 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8
9301 // CHECK6-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
9302 // CHECK6-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
9303 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8
9304 // CHECK6-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
9305 // CHECK6-NEXT:    store i8* null, i8** [[TMP26]], align 8
9306 // CHECK6-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9307 // CHECK6-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9308 // CHECK6-NEXT:    [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
9309 // CHECK6-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
9310 // CHECK6-NEXT:    br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
9311 // CHECK6:       omp_offload.failed:
9312 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
9313 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
9314 // CHECK6:       omp_offload.cont:
9315 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
9316 // CHECK6:       omp_if.else:
9317 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
9318 // CHECK6-NEXT:    br label [[OMP_IF_END]]
9319 // CHECK6:       omp_if.end:
9320 // CHECK6-NEXT:    [[TMP31:%.*]] = load i32, i32* [[A]], align 4
9321 // CHECK6-NEXT:    ret i32 [[TMP31]]
9322 //
9323 //
9324 // CHECK6-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
9325 // CHECK6-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
9326 // CHECK6-NEXT:  entry:
9327 // CHECK6-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
9328 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
9329 // CHECK6-NEXT:    [[AA:%.*]] = alloca i16, align 2
9330 // CHECK6-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
9331 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
9332 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
9333 // CHECK6-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
9334 // CHECK6-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
9335 // CHECK6-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
9336 // CHECK6-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
9337 // CHECK6-NEXT:    store i32 0, i32* [[A]], align 4
9338 // CHECK6-NEXT:    store i16 0, i16* [[AA]], align 2
9339 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
9340 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
9341 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
9342 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
9343 // CHECK6-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
9344 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
9345 // CHECK6-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
9346 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
9347 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
9348 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
9349 // CHECK6-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
9350 // CHECK6:       omp_if.then:
9351 // CHECK6-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9352 // CHECK6-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
9353 // CHECK6-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
9354 // CHECK6-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9355 // CHECK6-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
9356 // CHECK6-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
9357 // CHECK6-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
9358 // CHECK6-NEXT:    store i8* null, i8** [[TMP9]], align 8
9359 // CHECK6-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
9360 // CHECK6-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64*
9361 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP11]], align 8
9362 // CHECK6-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
9363 // CHECK6-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
9364 // CHECK6-NEXT:    store i64 [[TMP3]], i64* [[TMP13]], align 8
9365 // CHECK6-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
9366 // CHECK6-NEXT:    store i8* null, i8** [[TMP14]], align 8
9367 // CHECK6-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
9368 // CHECK6-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
9369 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8
9370 // CHECK6-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
9371 // CHECK6-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
9372 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8
9373 // CHECK6-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
9374 // CHECK6-NEXT:    store i8* null, i8** [[TMP19]], align 8
9375 // CHECK6-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9376 // CHECK6-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9377 // CHECK6-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
9378 // CHECK6-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
9379 // CHECK6-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
9380 // CHECK6:       omp_offload.failed:
9381 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
9382 // CHECK6-NEXT:    br label [[OMP_OFFLOAD_CONT]]
9383 // CHECK6:       omp_offload.cont:
9384 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
9385 // CHECK6:       omp_if.else:
9386 // CHECK6-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
9387 // CHECK6-NEXT:    br label [[OMP_IF_END]]
9388 // CHECK6:       omp_if.end:
9389 // CHECK6-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
9390 // CHECK6-NEXT:    ret i32 [[TMP24]]
9391 //
9392 //
9393 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214
9394 // CHECK6-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
9395 // CHECK6-NEXT:  entry:
9396 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
9397 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
9398 // CHECK6-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9399 // CHECK6-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
9400 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
9401 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9402 // CHECK6-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
9403 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9404 // CHECK6-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
9405 // CHECK6-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
9406 // CHECK6-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
9407 // CHECK6-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
9408 // CHECK6-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
9409 // CHECK6-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9410 // CHECK6-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
9411 // CHECK6-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
9412 // CHECK6-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
9413 // CHECK6-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
9414 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
9415 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9416 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
9417 // CHECK6-NEXT:    [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8
9418 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
9419 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32, i32* [[CONV]], align 4
9420 // CHECK6-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32*
9421 // CHECK6-NEXT:    store i32 [[TMP5]], i32* [[CONV4]], align 4
9422 // CHECK6-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
9423 // CHECK6-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
9424 // CHECK6-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
9425 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
9426 // CHECK6-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
9427 // CHECK6-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV5]], align 1
9428 // CHECK6-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
9429 // CHECK6-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1
9430 // CHECK6-NEXT:    [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1
9431 // CHECK6-NEXT:    br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
9432 // CHECK6:       omp_if.then:
9433 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]])
9434 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
9435 // CHECK6:       omp_if.else:
9436 // CHECK6-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
9437 // CHECK6-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
9438 // CHECK6-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
9439 // CHECK6-NEXT:    call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR4]]
9440 // CHECK6-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
9441 // CHECK6-NEXT:    br label [[OMP_IF_END]]
9442 // CHECK6:       omp_if.end:
9443 // CHECK6-NEXT:    ret void
9444 //
9445 //
9446 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..10
9447 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
9448 // CHECK6-NEXT:  entry:
9449 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9450 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9451 // CHECK6-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
9452 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
9453 // CHECK6-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
9454 // CHECK6-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
9455 // CHECK6-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
9456 // CHECK6-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9457 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
9458 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i64, align 8
9459 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
9460 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
9461 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
9462 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9463 // CHECK6-NEXT:    [[IT:%.*]] = alloca i64, align 8
9464 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9465 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9466 // CHECK6-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
9467 // CHECK6-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
9468 // CHECK6-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
9469 // CHECK6-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
9470 // CHECK6-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
9471 // CHECK6-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
9472 // CHECK6-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
9473 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
9474 // CHECK6-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
9475 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
9476 // CHECK6-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
9477 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
9478 // CHECK6-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
9479 // CHECK6-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
9480 // CHECK6-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
9481 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9482 // CHECK6-NEXT:    [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 1
9483 // CHECK6-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1
9484 // CHECK6-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
9485 // CHECK6:       omp_if.then:
9486 // CHECK6-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9487 // CHECK6-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
9488 // CHECK6-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
9489 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
9490 // CHECK6-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3
9491 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9492 // CHECK6:       cond.true:
9493 // CHECK6-NEXT:    br label [[COND_END:%.*]]
9494 // CHECK6:       cond.false:
9495 // CHECK6-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
9496 // CHECK6-NEXT:    br label [[COND_END]]
9497 // CHECK6:       cond.end:
9498 // CHECK6-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
9499 // CHECK6-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
9500 // CHECK6-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
9501 // CHECK6-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8
9502 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9503 // CHECK6:       omp.inner.for.cond:
9504 // CHECK6-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
9505 // CHECK6-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !38
9506 // CHECK6-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]]
9507 // CHECK6-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9508 // CHECK6:       omp.inner.for.body:
9509 // CHECK6-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
9510 // CHECK6-NEXT:    [[MUL:%.*]] = mul i64 [[TMP12]], 400
9511 // CHECK6-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
9512 // CHECK6-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !38
9513 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !38
9514 // CHECK6-NEXT:    [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double
9515 // CHECK6-NEXT:    [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00
9516 // CHECK6-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
9517 // CHECK6-NEXT:    store double [[ADD]], double* [[A]], align 8, !nontemporal !39, !llvm.access.group !38
9518 // CHECK6-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
9519 // CHECK6-NEXT:    [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !39, !llvm.access.group !38
9520 // CHECK6-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
9521 // CHECK6-NEXT:    store double [[INC]], double* [[A6]], align 8, !nontemporal !39, !llvm.access.group !38
9522 // CHECK6-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
9523 // CHECK6-NEXT:    [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]]
9524 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]]
9525 // CHECK6-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
9526 // CHECK6-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !38
9527 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9528 // CHECK6:       omp.body.continue:
9529 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9530 // CHECK6:       omp.inner.for.inc:
9531 // CHECK6-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
9532 // CHECK6-NEXT:    [[ADD9:%.*]] = add i64 [[TMP16]], 1
9533 // CHECK6-NEXT:    store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !38
9534 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
9535 // CHECK6:       omp.inner.for.end:
9536 // CHECK6-NEXT:    br label [[OMP_IF_END:%.*]]
9537 // CHECK6:       omp_if.else:
9538 // CHECK6-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9539 // CHECK6-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
9540 // CHECK6-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
9541 // CHECK6-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
9542 // CHECK6-NEXT:    [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3
9543 // CHECK6-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
9544 // CHECK6:       cond.true11:
9545 // CHECK6-NEXT:    br label [[COND_END13:%.*]]
9546 // CHECK6:       cond.false12:
9547 // CHECK6-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
9548 // CHECK6-NEXT:    br label [[COND_END13]]
9549 // CHECK6:       cond.end13:
9550 // CHECK6-NEXT:    [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ]
9551 // CHECK6-NEXT:    store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8
9552 // CHECK6-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
9553 // CHECK6-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8
9554 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND15:%.*]]
9555 // CHECK6:       omp.inner.for.cond15:
9556 // CHECK6-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
9557 // CHECK6-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
9558 // CHECK6-NEXT:    [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
9559 // CHECK6-NEXT:    br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]]
9560 // CHECK6:       omp.inner.for.body17:
9561 // CHECK6-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
9562 // CHECK6-NEXT:    [[MUL18:%.*]] = mul i64 [[TMP24]], 400
9563 // CHECK6-NEXT:    [[SUB19:%.*]] = sub i64 2000, [[MUL18]]
9564 // CHECK6-NEXT:    store i64 [[SUB19]], i64* [[IT]], align 8
9565 // CHECK6-NEXT:    [[TMP25:%.*]] = load i32, i32* [[CONV]], align 4
9566 // CHECK6-NEXT:    [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double
9567 // CHECK6-NEXT:    [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00
9568 // CHECK6-NEXT:    [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
9569 // CHECK6-NEXT:    store double [[ADD21]], double* [[A22]], align 8
9570 // CHECK6-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
9571 // CHECK6-NEXT:    [[TMP26:%.*]] = load double, double* [[A23]], align 8
9572 // CHECK6-NEXT:    [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00
9573 // CHECK6-NEXT:    store double [[INC24]], double* [[A23]], align 8
9574 // CHECK6-NEXT:    [[CONV25:%.*]] = fptosi double [[INC24]] to i16
9575 // CHECK6-NEXT:    [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]]
9576 // CHECK6-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]]
9577 // CHECK6-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1
9578 // CHECK6-NEXT:    store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2
9579 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE28:%.*]]
9580 // CHECK6:       omp.body.continue28:
9581 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC29:%.*]]
9582 // CHECK6:       omp.inner.for.inc29:
9583 // CHECK6-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
9584 // CHECK6-NEXT:    [[ADD30:%.*]] = add i64 [[TMP28]], 1
9585 // CHECK6-NEXT:    store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8
9586 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP42:![0-9]+]]
9587 // CHECK6:       omp.inner.for.end31:
9588 // CHECK6-NEXT:    br label [[OMP_IF_END]]
9589 // CHECK6:       omp_if.end:
9590 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9591 // CHECK6:       omp.loop.exit:
9592 // CHECK6-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9593 // CHECK6-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
9594 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
9595 // CHECK6-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9596 // CHECK6-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9597 // CHECK6-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9598 // CHECK6:       .omp.final.then:
9599 // CHECK6-NEXT:    store i64 400, i64* [[IT]], align 8
9600 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9601 // CHECK6:       .omp.final.done:
9602 // CHECK6-NEXT:    ret void
9603 //
9604 //
9605 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
9606 // CHECK6-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
9607 // CHECK6-NEXT:  entry:
9608 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9609 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9610 // CHECK6-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
9611 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
9612 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
9613 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
9614 // CHECK6-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
9615 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9616 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9617 // CHECK6-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
9618 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
9619 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9620 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9621 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
9622 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
9623 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
9624 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
9625 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[CONV3]], align 4
9626 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
9627 // CHECK6-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
9628 // CHECK6-NEXT:    [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
9629 // CHECK6-NEXT:    store i16 [[TMP3]], i16* [[CONV4]], align 2
9630 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
9631 // CHECK6-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1
9632 // CHECK6-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
9633 // CHECK6-NEXT:    store i8 [[TMP5]], i8* [[CONV5]], align 1
9634 // CHECK6-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
9635 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
9636 // CHECK6-NEXT:    ret void
9637 //
9638 //
9639 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..13
9640 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
9641 // CHECK6-NEXT:  entry:
9642 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9643 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9644 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9645 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9646 // CHECK6-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
9647 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
9648 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9649 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9650 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9651 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9652 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9653 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9654 // CHECK6-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
9655 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
9656 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9657 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9658 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
9659 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
9660 // CHECK6-NEXT:    ret void
9661 //
9662 //
9663 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
9664 // CHECK6-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
9665 // CHECK6-NEXT:  entry:
9666 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9667 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9668 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
9669 // CHECK6-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
9670 // CHECK6-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
9671 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9672 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9673 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
9674 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9675 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9676 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
9677 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
9678 // CHECK6-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
9679 // CHECK6-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
9680 // CHECK6-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
9681 // CHECK6-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
9682 // CHECK6-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
9683 // CHECK6-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
9684 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
9685 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
9686 // CHECK6-NEXT:    ret void
9687 //
9688 //
9689 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..16
9690 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
9691 // CHECK6-NEXT:  entry:
9692 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9693 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9694 // CHECK6-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
9695 // CHECK6-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
9696 // CHECK6-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
9697 // CHECK6-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
9698 // CHECK6-NEXT:    [[TMP:%.*]] = alloca i64, align 8
9699 // CHECK6-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
9700 // CHECK6-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
9701 // CHECK6-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
9702 // CHECK6-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9703 // CHECK6-NEXT:    [[I:%.*]] = alloca i64, align 8
9704 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9705 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9706 // CHECK6-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
9707 // CHECK6-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
9708 // CHECK6-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
9709 // CHECK6-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
9710 // CHECK6-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
9711 // CHECK6-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
9712 // CHECK6-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
9713 // CHECK6-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
9714 // CHECK6-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
9715 // CHECK6-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9716 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9717 // CHECK6-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
9718 // CHECK6-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
9719 // CHECK6-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
9720 // CHECK6-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
9721 // CHECK6-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9722 // CHECK6:       cond.true:
9723 // CHECK6-NEXT:    br label [[COND_END:%.*]]
9724 // CHECK6:       cond.false:
9725 // CHECK6-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
9726 // CHECK6-NEXT:    br label [[COND_END]]
9727 // CHECK6:       cond.end:
9728 // CHECK6-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
9729 // CHECK6-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
9730 // CHECK6-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
9731 // CHECK6-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
9732 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9733 // CHECK6:       omp.inner.for.cond:
9734 // CHECK6-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44
9735 // CHECK6-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !44
9736 // CHECK6-NEXT:    [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
9737 // CHECK6-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9738 // CHECK6:       omp.inner.for.body:
9739 // CHECK6-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44
9740 // CHECK6-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
9741 // CHECK6-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
9742 // CHECK6-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !44
9743 // CHECK6-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !44
9744 // CHECK6-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
9745 // CHECK6-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !44
9746 // CHECK6-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !44
9747 // CHECK6-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
9748 // CHECK6-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
9749 // CHECK6-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
9750 // CHECK6-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !44
9751 // CHECK6-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
9752 // CHECK6-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
9753 // CHECK6-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
9754 // CHECK6-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !44
9755 // CHECK6-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9756 // CHECK6:       omp.body.continue:
9757 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9758 // CHECK6:       omp.inner.for.inc:
9759 // CHECK6-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44
9760 // CHECK6-NEXT:    [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1
9761 // CHECK6-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !44
9762 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
9763 // CHECK6:       omp.inner.for.end:
9764 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9765 // CHECK6:       omp.loop.exit:
9766 // CHECK6-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
9767 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9768 // CHECK6-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
9769 // CHECK6-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9770 // CHECK6:       .omp.final.then:
9771 // CHECK6-NEXT:    store i64 11, i64* [[I]], align 8
9772 // CHECK6-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9773 // CHECK6:       .omp.final.done:
9774 // CHECK6-NEXT:    ret void
9775 //
9776 //
9777 // CHECK6-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
9778 // CHECK6-SAME: () #[[ATTR8:[0-9]+]] {
9779 // CHECK6-NEXT:  entry:
9780 // CHECK6-NEXT:    call void @__tgt_register_requires(i64 1)
9781 // CHECK6-NEXT:    ret void
9782 //
9783 //
9784 // CHECK7-LABEL: define {{[^@]+}}@_Z7get_valv
9785 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
9786 // CHECK7-NEXT:  entry:
9787 // CHECK7-NEXT:    ret i64 0
9788 //
9789 //
9790 // CHECK7-LABEL: define {{[^@]+}}@_Z3fooi
9791 // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
9792 // CHECK7-NEXT:  entry:
9793 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
9794 // CHECK7-NEXT:    [[A:%.*]] = alloca i32, align 4
9795 // CHECK7-NEXT:    [[AA:%.*]] = alloca i16, align 2
9796 // CHECK7-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
9797 // CHECK7-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
9798 // CHECK7-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
9799 // CHECK7-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
9800 // CHECK7-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
9801 // CHECK7-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
9802 // CHECK7-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
9803 // CHECK7-NEXT:    [[K:%.*]] = alloca i64, align 8
9804 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
9805 // CHECK7-NEXT:    [[LIN:%.*]] = alloca i32, align 4
9806 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
9807 // CHECK7-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
9808 // CHECK7-NEXT:    [[A_CASTED2:%.*]] = alloca i32, align 4
9809 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
9810 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
9811 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
9812 // CHECK7-NEXT:    [[A_CASTED3:%.*]] = alloca i32, align 4
9813 // CHECK7-NEXT:    [[AA_CASTED4:%.*]] = alloca i32, align 4
9814 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4
9815 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4
9816 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4
9817 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9818 // CHECK7-NEXT:    [[A_CASTED11:%.*]] = alloca i32, align 4
9819 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
9820 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4
9821 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4
9822 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4
9823 // CHECK7-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4
9824 // CHECK7-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
9825 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
9826 // CHECK7-NEXT:    store i32 0, i32* [[A]], align 4
9827 // CHECK7-NEXT:    store i16 0, i16* [[AA]], align 2
9828 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
9829 // CHECK7-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
9830 // CHECK7-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
9831 // CHECK7-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
9832 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
9833 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
9834 // CHECK7-NEXT:    [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
9835 // CHECK7-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
9836 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
9837 // CHECK7-NEXT:    [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
9838 // CHECK7-NEXT:    [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates*
9839 // CHECK7-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0
9840 // CHECK7-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]])
9841 // CHECK7-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
9842 // CHECK7-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
9843 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A]], align 4
9844 // CHECK7-NEXT:    store i32 [[TMP9]], i32* [[A_CASTED]], align 4
9845 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4
9846 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]]
9847 // CHECK7-NEXT:    store i32 12, i32* [[LIN]], align 4
9848 // CHECK7-NEXT:    [[TMP11:%.*]] = load i16, i16* [[AA]], align 2
9849 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
9850 // CHECK7-NEXT:    store i16 [[TMP11]], i16* [[CONV]], align 2
9851 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4
9852 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4
9853 // CHECK7-NEXT:    store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4
9854 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
9855 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[A]], align 4
9856 // CHECK7-NEXT:    store i32 [[TMP15]], i32* [[A_CASTED2]], align 4
9857 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4
9858 // CHECK7-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9859 // CHECK7-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
9860 // CHECK7-NEXT:    store i32 [[TMP12]], i32* [[TMP18]], align 4
9861 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9862 // CHECK7-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
9863 // CHECK7-NEXT:    store i32 [[TMP12]], i32* [[TMP20]], align 4
9864 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
9865 // CHECK7-NEXT:    store i8* null, i8** [[TMP21]], align 4
9866 // CHECK7-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
9867 // CHECK7-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
9868 // CHECK7-NEXT:    store i32 [[TMP14]], i32* [[TMP23]], align 4
9869 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
9870 // CHECK7-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
9871 // CHECK7-NEXT:    store i32 [[TMP14]], i32* [[TMP25]], align 4
9872 // CHECK7-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
9873 // CHECK7-NEXT:    store i8* null, i8** [[TMP26]], align 4
9874 // CHECK7-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
9875 // CHECK7-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
9876 // CHECK7-NEXT:    store i32 [[TMP16]], i32* [[TMP28]], align 4
9877 // CHECK7-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
9878 // CHECK7-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
9879 // CHECK7-NEXT:    store i32 [[TMP16]], i32* [[TMP30]], align 4
9880 // CHECK7-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
9881 // CHECK7-NEXT:    store i8* null, i8** [[TMP31]], align 4
9882 // CHECK7-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9883 // CHECK7-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9884 // CHECK7-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
9885 // CHECK7-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
9886 // CHECK7-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
9887 // CHECK7:       omp_offload.failed:
9888 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]]
9889 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
9890 // CHECK7:       omp_offload.cont:
9891 // CHECK7-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
9892 // CHECK7-NEXT:    store i32 [[TMP36]], i32* [[A_CASTED3]], align 4
9893 // CHECK7-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A_CASTED3]], align 4
9894 // CHECK7-NEXT:    [[TMP38:%.*]] = load i16, i16* [[AA]], align 2
9895 // CHECK7-NEXT:    [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16*
9896 // CHECK7-NEXT:    store i16 [[TMP38]], i16* [[CONV5]], align 2
9897 // CHECK7-NEXT:    [[TMP39:%.*]] = load i32, i32* [[AA_CASTED4]], align 4
9898 // CHECK7-NEXT:    [[TMP40:%.*]] = load i32, i32* [[N_ADDR]], align 4
9899 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP40]], 10
9900 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
9901 // CHECK7:       omp_if.then:
9902 // CHECK7-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
9903 // CHECK7-NEXT:    [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32*
9904 // CHECK7-NEXT:    store i32 [[TMP37]], i32* [[TMP42]], align 4
9905 // CHECK7-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
9906 // CHECK7-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
9907 // CHECK7-NEXT:    store i32 [[TMP37]], i32* [[TMP44]], align 4
9908 // CHECK7-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0
9909 // CHECK7-NEXT:    store i8* null, i8** [[TMP45]], align 4
9910 // CHECK7-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1
9911 // CHECK7-NEXT:    [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32*
9912 // CHECK7-NEXT:    store i32 [[TMP39]], i32* [[TMP47]], align 4
9913 // CHECK7-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1
9914 // CHECK7-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32*
9915 // CHECK7-NEXT:    store i32 [[TMP39]], i32* [[TMP49]], align 4
9916 // CHECK7-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1
9917 // CHECK7-NEXT:    store i8* null, i8** [[TMP50]], align 4
9918 // CHECK7-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
9919 // CHECK7-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
9920 // CHECK7-NEXT:    [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP51]], i8** [[TMP52]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
9921 // CHECK7-NEXT:    [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0
9922 // CHECK7-NEXT:    br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]]
9923 // CHECK7:       omp_offload.failed9:
9924 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]]
9925 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT10]]
9926 // CHECK7:       omp_offload.cont10:
9927 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
9928 // CHECK7:       omp_if.else:
9929 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]]
9930 // CHECK7-NEXT:    br label [[OMP_IF_END]]
9931 // CHECK7:       omp_if.end:
9932 // CHECK7-NEXT:    [[TMP55:%.*]] = load i32, i32* [[A]], align 4
9933 // CHECK7-NEXT:    store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_]], align 4
9934 // CHECK7-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
9935 // CHECK7-NEXT:    store i32 [[TMP56]], i32* [[A_CASTED11]], align 4
9936 // CHECK7-NEXT:    [[TMP57:%.*]] = load i32, i32* [[A_CASTED11]], align 4
9937 // CHECK7-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9938 // CHECK7-NEXT:    store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
9939 // CHECK7-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
9940 // CHECK7-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N_ADDR]], align 4
9941 // CHECK7-NEXT:    [[CMP12:%.*]] = icmp sgt i32 [[TMP60]], 20
9942 // CHECK7-NEXT:    br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]]
9943 // CHECK7:       omp_if.then13:
9944 // CHECK7-NEXT:    [[TMP61:%.*]] = mul nuw i32 [[TMP1]], 4
9945 // CHECK7-NEXT:    [[TMP62:%.*]] = sext i32 [[TMP61]] to i64
9946 // CHECK7-NEXT:    [[TMP63:%.*]] = mul nuw i32 5, [[TMP3]]
9947 // CHECK7-NEXT:    [[TMP64:%.*]] = mul nuw i32 [[TMP63]], 8
9948 // CHECK7-NEXT:    [[TMP65:%.*]] = sext i32 [[TMP64]] to i64
9949 // CHECK7-NEXT:    [[TMP66:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
9950 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP66]], i8* align 4 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i32 80, i1 false)
9951 // CHECK7-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
9952 // CHECK7-NEXT:    [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32*
9953 // CHECK7-NEXT:    store i32 [[TMP57]], i32* [[TMP68]], align 4
9954 // CHECK7-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
9955 // CHECK7-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32*
9956 // CHECK7-NEXT:    store i32 [[TMP57]], i32* [[TMP70]], align 4
9957 // CHECK7-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0
9958 // CHECK7-NEXT:    store i8* null, i8** [[TMP71]], align 4
9959 // CHECK7-NEXT:    [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1
9960 // CHECK7-NEXT:    [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]**
9961 // CHECK7-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4
9962 // CHECK7-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1
9963 // CHECK7-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]**
9964 // CHECK7-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4
9965 // CHECK7-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1
9966 // CHECK7-NEXT:    store i8* null, i8** [[TMP76]], align 4
9967 // CHECK7-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2
9968 // CHECK7-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
9969 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP78]], align 4
9970 // CHECK7-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2
9971 // CHECK7-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
9972 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP80]], align 4
9973 // CHECK7-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2
9974 // CHECK7-NEXT:    store i8* null, i8** [[TMP81]], align 4
9975 // CHECK7-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3
9976 // CHECK7-NEXT:    [[TMP83:%.*]] = bitcast i8** [[TMP82]] to float**
9977 // CHECK7-NEXT:    store float* [[VLA]], float** [[TMP83]], align 4
9978 // CHECK7-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3
9979 // CHECK7-NEXT:    [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float**
9980 // CHECK7-NEXT:    store float* [[VLA]], float** [[TMP85]], align 4
9981 // CHECK7-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
9982 // CHECK7-NEXT:    store i64 [[TMP62]], i64* [[TMP86]], align 4
9983 // CHECK7-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3
9984 // CHECK7-NEXT:    store i8* null, i8** [[TMP87]], align 4
9985 // CHECK7-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4
9986 // CHECK7-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]**
9987 // CHECK7-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 4
9988 // CHECK7-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4
9989 // CHECK7-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]**
9990 // CHECK7-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4
9991 // CHECK7-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4
9992 // CHECK7-NEXT:    store i8* null, i8** [[TMP92]], align 4
9993 // CHECK7-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5
9994 // CHECK7-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32*
9995 // CHECK7-NEXT:    store i32 5, i32* [[TMP94]], align 4
9996 // CHECK7-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5
9997 // CHECK7-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32*
9998 // CHECK7-NEXT:    store i32 5, i32* [[TMP96]], align 4
9999 // CHECK7-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5
10000 // CHECK7-NEXT:    store i8* null, i8** [[TMP97]], align 4
10001 // CHECK7-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6
10002 // CHECK7-NEXT:    [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32*
10003 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP99]], align 4
10004 // CHECK7-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6
10005 // CHECK7-NEXT:    [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32*
10006 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP101]], align 4
10007 // CHECK7-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6
10008 // CHECK7-NEXT:    store i8* null, i8** [[TMP102]], align 4
10009 // CHECK7-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7
10010 // CHECK7-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double**
10011 // CHECK7-NEXT:    store double* [[VLA1]], double** [[TMP104]], align 4
10012 // CHECK7-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7
10013 // CHECK7-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double**
10014 // CHECK7-NEXT:    store double* [[VLA1]], double** [[TMP106]], align 4
10015 // CHECK7-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
10016 // CHECK7-NEXT:    store i64 [[TMP65]], i64* [[TMP107]], align 4
10017 // CHECK7-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7
10018 // CHECK7-NEXT:    store i8* null, i8** [[TMP108]], align 4
10019 // CHECK7-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8
10020 // CHECK7-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to %struct.TT**
10021 // CHECK7-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP110]], align 4
10022 // CHECK7-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8
10023 // CHECK7-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to %struct.TT**
10024 // CHECK7-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP112]], align 4
10025 // CHECK7-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8
10026 // CHECK7-NEXT:    store i8* null, i8** [[TMP113]], align 4
10027 // CHECK7-NEXT:    [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9
10028 // CHECK7-NEXT:    [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
10029 // CHECK7-NEXT:    store i32 [[TMP59]], i32* [[TMP115]], align 4
10030 // CHECK7-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9
10031 // CHECK7-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
10032 // CHECK7-NEXT:    store i32 [[TMP59]], i32* [[TMP117]], align 4
10033 // CHECK7-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9
10034 // CHECK7-NEXT:    store i8* null, i8** [[TMP118]], align 4
10035 // CHECK7-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
10036 // CHECK7-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
10037 // CHECK7-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
10038 // CHECK7-NEXT:    [[TMP122:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP119]], i8** [[TMP120]], i64* [[TMP121]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
10039 // CHECK7-NEXT:    [[TMP123:%.*]] = icmp ne i32 [[TMP122]], 0
10040 // CHECK7-NEXT:    br i1 [[TMP123]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
10041 // CHECK7:       omp_offload.failed17:
10042 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]]
10043 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
10044 // CHECK7:       omp_offload.cont18:
10045 // CHECK7-NEXT:    br label [[OMP_IF_END20:%.*]]
10046 // CHECK7:       omp_if.else19:
10047 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]]
10048 // CHECK7-NEXT:    br label [[OMP_IF_END20]]
10049 // CHECK7:       omp_if.end20:
10050 // CHECK7-NEXT:    [[TMP124:%.*]] = load i32, i32* [[A]], align 4
10051 // CHECK7-NEXT:    [[TMP125:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
10052 // CHECK7-NEXT:    call void @llvm.stackrestore(i8* [[TMP125]])
10053 // CHECK7-NEXT:    ret i32 [[TMP124]]
10054 //
10055 //
10056 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
10057 // CHECK7-SAME: () #[[ATTR2:[0-9]+]] {
10058 // CHECK7-NEXT:  entry:
10059 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
10060 // CHECK7-NEXT:    ret void
10061 //
10062 //
10063 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined.
10064 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
10065 // CHECK7-NEXT:  entry:
10066 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10067 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10068 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10069 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10070 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10071 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10072 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10073 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10074 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
10075 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10076 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10077 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10078 // CHECK7-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
10079 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10080 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10081 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10082 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
10083 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10084 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10085 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
10086 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10087 // CHECK7:       cond.true:
10088 // CHECK7-NEXT:    br label [[COND_END:%.*]]
10089 // CHECK7:       cond.false:
10090 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10091 // CHECK7-NEXT:    br label [[COND_END]]
10092 // CHECK7:       cond.end:
10093 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
10094 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10095 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10096 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
10097 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10098 // CHECK7:       omp.inner.for.cond:
10099 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
10100 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
10101 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
10102 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10103 // CHECK7:       omp.inner.for.body:
10104 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
10105 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
10106 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
10107 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
10108 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10109 // CHECK7:       omp.body.continue:
10110 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10111 // CHECK7:       omp.inner.for.inc:
10112 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
10113 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
10114 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
10115 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
10116 // CHECK7:       omp.inner.for.end:
10117 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10118 // CHECK7:       omp.loop.exit:
10119 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
10120 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10121 // CHECK7-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
10122 // CHECK7-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10123 // CHECK7:       .omp.final.then:
10124 // CHECK7-NEXT:    store i32 33, i32* [[I]], align 4
10125 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10126 // CHECK7:       .omp.final.done:
10127 // CHECK7-NEXT:    ret void
10128 //
10129 //
10130 // CHECK7-LABEL: define {{[^@]+}}@.omp_task_entry.
10131 // CHECK7-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
10132 // CHECK7-NEXT:  entry:
10133 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
10134 // CHECK7-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
10135 // CHECK7-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
10136 // CHECK7-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
10137 // CHECK7-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
10138 // CHECK7-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
10139 // CHECK7-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
10140 // CHECK7-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
10141 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
10142 // CHECK7-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
10143 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
10144 // CHECK7-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
10145 // CHECK7-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
10146 // CHECK7-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
10147 // CHECK7-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
10148 // CHECK7-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
10149 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
10150 // CHECK7-NEXT:    [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
10151 // CHECK7-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
10152 // CHECK7-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
10153 // CHECK7-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
10154 // CHECK7-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
10155 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26
10156 // CHECK7-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26
10157 // CHECK7-NEXT:    store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26
10158 // CHECK7-NEXT:    store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26
10159 // CHECK7-NEXT:    store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26
10160 // CHECK7-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26
10161 // CHECK7-NEXT:    [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26
10162 // CHECK7-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
10163 // CHECK7-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
10164 // CHECK7-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
10165 // CHECK7:       omp_offload.failed.i:
10166 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]]
10167 // CHECK7-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
10168 // CHECK7:       .omp_outlined..1.exit:
10169 // CHECK7-NEXT:    ret i32 0
10170 //
10171 //
10172 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101
10173 // CHECK7-SAME: (i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] {
10174 // CHECK7-NEXT:  entry:
10175 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
10176 // CHECK7-NEXT:    [[K_ADDR:%.*]] = alloca i64*, align 4
10177 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
10178 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
10179 // CHECK7-NEXT:    store i64* [[K]], i64** [[K_ADDR]], align 4
10180 // CHECK7-NEXT:    [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4
10181 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
10182 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
10183 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
10184 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]])
10185 // CHECK7-NEXT:    ret void
10186 //
10187 //
10188 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2
10189 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] {
10190 // CHECK7-NEXT:  entry:
10191 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10192 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10193 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
10194 // CHECK7-NEXT:    [[K_ADDR:%.*]] = alloca i64*, align 4
10195 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10196 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10197 // CHECK7-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
10198 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10199 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10200 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10201 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10202 // CHECK7-NEXT:    [[I:%.*]] = alloca i32, align 4
10203 // CHECK7-NEXT:    [[K1:%.*]] = alloca i64, align 8
10204 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10205 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10206 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
10207 // CHECK7-NEXT:    store i64* [[K]], i64** [[K_ADDR]], align 4
10208 // CHECK7-NEXT:    [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4
10209 // CHECK7-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
10210 // CHECK7-NEXT:    store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8
10211 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10212 // CHECK7-NEXT:    store i32 8, i32* [[DOTOMP_UB]], align 4
10213 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10214 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10215 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10216 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
10217 // CHECK7-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
10218 // CHECK7-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1073741859, i32 0, i32 8, i32 1, i32 1)
10219 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10220 // CHECK7:       omp.dispatch.cond:
10221 // CHECK7-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
10222 // CHECK7-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0
10223 // CHECK7-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10224 // CHECK7:       omp.dispatch.body:
10225 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10226 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
10227 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10228 // CHECK7:       omp.inner.for.cond:
10229 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10230 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
10231 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10232 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10233 // CHECK7:       omp.inner.for.body:
10234 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10235 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
10236 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL]]
10237 // CHECK7-NEXT:    store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !27
10238 // CHECK7-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !27
10239 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10240 // CHECK7-NEXT:    [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3
10241 // CHECK7-NEXT:    [[CONV:%.*]] = sext i32 [[MUL2]] to i64
10242 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]]
10243 // CHECK7-NEXT:    store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !27
10244 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !27
10245 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
10246 // CHECK7-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !27
10247 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10248 // CHECK7:       omp.body.continue:
10249 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10250 // CHECK7:       omp.inner.for.inc:
10251 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10252 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
10253 // CHECK7-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10254 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
10255 // CHECK7:       omp.inner.for.end:
10256 // CHECK7-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10257 // CHECK7:       omp.dispatch.inc:
10258 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND]]
10259 // CHECK7:       omp.dispatch.end:
10260 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10261 // CHECK7-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
10262 // CHECK7-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10263 // CHECK7:       .omp.final.then:
10264 // CHECK7-NEXT:    store i32 1, i32* [[I]], align 4
10265 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10266 // CHECK7:       .omp.final.done:
10267 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10268 // CHECK7-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
10269 // CHECK7-NEXT:    br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
10270 // CHECK7:       .omp.linear.pu:
10271 // CHECK7-NEXT:    [[TMP17:%.*]] = load i64, i64* [[K1]], align 8
10272 // CHECK7-NEXT:    store i64 [[TMP17]], i64* [[TMP0]], align 8
10273 // CHECK7-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
10274 // CHECK7:       .omp.linear.pu.done:
10275 // CHECK7-NEXT:    ret void
10276 //
10277 //
10278 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
10279 // CHECK7-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR2]] {
10280 // CHECK7-NEXT:  entry:
10281 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
10282 // CHECK7-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
10283 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
10284 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
10285 // CHECK7-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
10286 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
10287 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
10288 // CHECK7-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
10289 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
10290 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
10291 // CHECK7-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
10292 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
10293 // CHECK7-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
10294 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
10295 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
10296 // CHECK7-NEXT:    store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4
10297 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
10298 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
10299 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[A_CASTED]], align 4
10300 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4
10301 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]])
10302 // CHECK7-NEXT:    ret void
10303 //
10304 //
10305 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3
10306 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
10307 // CHECK7-NEXT:  entry:
10308 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10309 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10310 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
10311 // CHECK7-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
10312 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
10313 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
10314 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i64, align 4
10315 // CHECK7-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
10316 // CHECK7-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i32, align 4
10317 // CHECK7-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
10318 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
10319 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
10320 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
10321 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10322 // CHECK7-NEXT:    [[IT:%.*]] = alloca i64, align 8
10323 // CHECK7-NEXT:    [[LIN2:%.*]] = alloca i32, align 4
10324 // CHECK7-NEXT:    [[A3:%.*]] = alloca i32, align 4
10325 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10326 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10327 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
10328 // CHECK7-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
10329 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
10330 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
10331 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
10332 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
10333 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
10334 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4
10335 // CHECK7-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
10336 // CHECK7-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
10337 // CHECK7-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
10338 // CHECK7-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
10339 // CHECK7-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
10340 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10341 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10342 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
10343 // CHECK7-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]])
10344 // CHECK7-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
10345 // CHECK7-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
10346 // CHECK7-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
10347 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10348 // CHECK7:       cond.true:
10349 // CHECK7-NEXT:    br label [[COND_END:%.*]]
10350 // CHECK7:       cond.false:
10351 // CHECK7-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
10352 // CHECK7-NEXT:    br label [[COND_END]]
10353 // CHECK7:       cond.end:
10354 // CHECK7-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
10355 // CHECK7-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
10356 // CHECK7-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
10357 // CHECK7-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
10358 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10359 // CHECK7:       omp.inner.for.cond:
10360 // CHECK7-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
10361 // CHECK7-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30
10362 // CHECK7-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
10363 // CHECK7-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10364 // CHECK7:       omp.inner.for.body:
10365 // CHECK7-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
10366 // CHECK7-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
10367 // CHECK7-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
10368 // CHECK7-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !30
10369 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !30
10370 // CHECK7-NEXT:    [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
10371 // CHECK7-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
10372 // CHECK7-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30
10373 // CHECK7-NEXT:    [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]]
10374 // CHECK7-NEXT:    [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]]
10375 // CHECK7-NEXT:    [[CONV7:%.*]] = trunc i64 [[ADD]] to i32
10376 // CHECK7-NEXT:    store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !30
10377 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !30
10378 // CHECK7-NEXT:    [[CONV8:%.*]] = sext i32 [[TMP13]] to i64
10379 // CHECK7-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
10380 // CHECK7-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30
10381 // CHECK7-NEXT:    [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]]
10382 // CHECK7-NEXT:    [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]]
10383 // CHECK7-NEXT:    [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32
10384 // CHECK7-NEXT:    store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !30
10385 // CHECK7-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
10386 // CHECK7-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP16]] to i32
10387 // CHECK7-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
10388 // CHECK7-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
10389 // CHECK7-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !30
10390 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10391 // CHECK7:       omp.body.continue:
10392 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10393 // CHECK7:       omp.inner.for.inc:
10394 // CHECK7-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
10395 // CHECK7-NEXT:    [[ADD15:%.*]] = add i64 [[TMP17]], 1
10396 // CHECK7-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
10397 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
10398 // CHECK7:       omp.inner.for.end:
10399 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10400 // CHECK7:       omp.loop.exit:
10401 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
10402 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10403 // CHECK7-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
10404 // CHECK7-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10405 // CHECK7:       .omp.final.then:
10406 // CHECK7-NEXT:    store i64 400, i64* [[IT]], align 8
10407 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10408 // CHECK7:       .omp.final.done:
10409 // CHECK7-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10410 // CHECK7-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
10411 // CHECK7-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
10412 // CHECK7:       .omp.linear.pu:
10413 // CHECK7-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4
10414 // CHECK7-NEXT:    store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4
10415 // CHECK7-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A3]], align 4
10416 // CHECK7-NEXT:    store i32 [[TMP23]], i32* [[A_ADDR]], align 4
10417 // CHECK7-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
10418 // CHECK7:       .omp.linear.pu.done:
10419 // CHECK7-NEXT:    ret void
10420 //
10421 //
10422 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
10423 // CHECK7-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] {
10424 // CHECK7-NEXT:  entry:
10425 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
10426 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
10427 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
10428 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
10429 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
10430 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
10431 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
10432 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
10433 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
10434 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
10435 // CHECK7-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
10436 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
10437 // CHECK7-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
10438 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
10439 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
10440 // CHECK7-NEXT:    ret void
10441 //
10442 //
10443 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..4
10444 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
10445 // CHECK7-NEXT:  entry:
10446 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10447 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10448 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
10449 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
10450 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10451 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i16, align 2
10452 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10453 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10454 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10455 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10456 // CHECK7-NEXT:    [[IT:%.*]] = alloca i16, align 2
10457 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10458 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10459 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
10460 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
10461 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
10462 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10463 // CHECK7-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
10464 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10465 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10466 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10467 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
10468 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10469 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10470 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
10471 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10472 // CHECK7:       cond.true:
10473 // CHECK7-NEXT:    br label [[COND_END:%.*]]
10474 // CHECK7:       cond.false:
10475 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10476 // CHECK7-NEXT:    br label [[COND_END]]
10477 // CHECK7:       cond.end:
10478 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
10479 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10480 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10481 // CHECK7-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
10482 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10483 // CHECK7:       omp.inner.for.cond:
10484 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10485 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
10486 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
10487 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10488 // CHECK7:       omp.inner.for.body:
10489 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10490 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
10491 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
10492 // CHECK7-NEXT:    [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
10493 // CHECK7-NEXT:    store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !33
10494 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33
10495 // CHECK7-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
10496 // CHECK7-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !33
10497 // CHECK7-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !33
10498 // CHECK7-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
10499 // CHECK7-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
10500 // CHECK7-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
10501 // CHECK7-NEXT:    store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group !33
10502 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10503 // CHECK7:       omp.body.continue:
10504 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10505 // CHECK7:       omp.inner.for.inc:
10506 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10507 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
10508 // CHECK7-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10509 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
10510 // CHECK7:       omp.inner.for.end:
10511 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10512 // CHECK7:       omp.loop.exit:
10513 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
10514 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10515 // CHECK7-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
10516 // CHECK7-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10517 // CHECK7:       .omp.final.then:
10518 // CHECK7-NEXT:    store i16 22, i16* [[IT]], align 2
10519 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10520 // CHECK7:       .omp.final.done:
10521 // CHECK7-NEXT:    ret void
10522 //
10523 //
10524 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
10525 // CHECK7-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10526 // CHECK7-NEXT:  entry:
10527 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
10528 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
10529 // CHECK7-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
10530 // CHECK7-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
10531 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
10532 // CHECK7-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
10533 // CHECK7-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
10534 // CHECK7-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
10535 // CHECK7-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
10536 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
10537 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
10538 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
10539 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
10540 // CHECK7-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
10541 // CHECK7-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
10542 // CHECK7-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
10543 // CHECK7-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
10544 // CHECK7-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
10545 // CHECK7-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
10546 // CHECK7-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
10547 // CHECK7-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
10548 // CHECK7-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
10549 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
10550 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
10551 // CHECK7-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
10552 // CHECK7-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
10553 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
10554 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
10555 // CHECK7-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
10556 // CHECK7-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
10557 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
10558 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
10559 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
10560 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
10561 // CHECK7-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
10562 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
10563 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]])
10564 // CHECK7-NEXT:    ret void
10565 //
10566 //
10567 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..7
10568 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
10569 // CHECK7-NEXT:  entry:
10570 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10571 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10572 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
10573 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
10574 // CHECK7-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
10575 // CHECK7-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
10576 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
10577 // CHECK7-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
10578 // CHECK7-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
10579 // CHECK7-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
10580 // CHECK7-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
10581 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
10582 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10583 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i8, align 1
10584 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10585 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10586 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10587 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10588 // CHECK7-NEXT:    [[IT:%.*]] = alloca i8, align 1
10589 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10590 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10591 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
10592 // CHECK7-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
10593 // CHECK7-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
10594 // CHECK7-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
10595 // CHECK7-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
10596 // CHECK7-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
10597 // CHECK7-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
10598 // CHECK7-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
10599 // CHECK7-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
10600 // CHECK7-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
10601 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
10602 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
10603 // CHECK7-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
10604 // CHECK7-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
10605 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
10606 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
10607 // CHECK7-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
10608 // CHECK7-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
10609 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10610 // CHECK7-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
10611 // CHECK7-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10612 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10613 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
10614 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10615 // CHECK7-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
10616 // CHECK7-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
10617 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
10618 // CHECK7:       omp.dispatch.cond:
10619 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10620 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
10621 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10622 // CHECK7:       cond.true:
10623 // CHECK7-NEXT:    br label [[COND_END:%.*]]
10624 // CHECK7:       cond.false:
10625 // CHECK7-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10626 // CHECK7-NEXT:    br label [[COND_END]]
10627 // CHECK7:       cond.end:
10628 // CHECK7-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
10629 // CHECK7-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10630 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10631 // CHECK7-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
10632 // CHECK7-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
10633 // CHECK7-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10634 // CHECK7-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
10635 // CHECK7-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10636 // CHECK7:       omp.dispatch.body:
10637 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10638 // CHECK7:       omp.inner.for.cond:
10639 // CHECK7-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
10640 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
10641 // CHECK7-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
10642 // CHECK7-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10643 // CHECK7:       omp.inner.for.body:
10644 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
10645 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
10646 // CHECK7-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
10647 // CHECK7-NEXT:    [[CONV:%.*]] = trunc i32 [[SUB]] to i8
10648 // CHECK7-NEXT:    store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !36
10649 // CHECK7-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36
10650 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
10651 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !36
10652 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
10653 // CHECK7-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36
10654 // CHECK7-NEXT:    [[CONV7:%.*]] = fpext float [[TMP20]] to double
10655 // CHECK7-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
10656 // CHECK7-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
10657 // CHECK7-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36
10658 // CHECK7-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
10659 // CHECK7-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
10660 // CHECK7-NEXT:    [[CONV11:%.*]] = fpext float [[TMP21]] to double
10661 // CHECK7-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
10662 // CHECK7-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
10663 // CHECK7-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
10664 // CHECK7-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
10665 // CHECK7-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2
10666 // CHECK7-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
10667 // CHECK7-NEXT:    [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00
10668 // CHECK7-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
10669 // CHECK7-NEXT:    [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]]
10670 // CHECK7-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]]
10671 // CHECK7-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3
10672 // CHECK7-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
10673 // CHECK7-NEXT:    [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00
10674 // CHECK7-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
10675 // CHECK7-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
10676 // CHECK7-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36
10677 // CHECK7-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1
10678 // CHECK7-NEXT:    store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !36
10679 // CHECK7-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
10680 // CHECK7-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36
10681 // CHECK7-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP26]] to i32
10682 // CHECK7-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
10683 // CHECK7-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
10684 // CHECK7-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !36
10685 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10686 // CHECK7:       omp.body.continue:
10687 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10688 // CHECK7:       omp.inner.for.inc:
10689 // CHECK7-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
10690 // CHECK7-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1
10691 // CHECK7-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
10692 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
10693 // CHECK7:       omp.inner.for.end:
10694 // CHECK7-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
10695 // CHECK7:       omp.dispatch.inc:
10696 // CHECK7-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10697 // CHECK7-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10698 // CHECK7-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
10699 // CHECK7-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4
10700 // CHECK7-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10701 // CHECK7-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
10702 // CHECK7-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
10703 // CHECK7-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4
10704 // CHECK7-NEXT:    br label [[OMP_DISPATCH_COND]]
10705 // CHECK7:       omp.dispatch.end:
10706 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
10707 // CHECK7-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10708 // CHECK7-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
10709 // CHECK7-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10710 // CHECK7:       .omp.final.then:
10711 // CHECK7-NEXT:    store i8 96, i8* [[IT]], align 1
10712 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10713 // CHECK7:       .omp.final.done:
10714 // CHECK7-NEXT:    ret void
10715 //
10716 //
10717 // CHECK7-LABEL: define {{[^@]+}}@_Z3bari
10718 // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
10719 // CHECK7-NEXT:  entry:
10720 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10721 // CHECK7-NEXT:    [[A:%.*]] = alloca i32, align 4
10722 // CHECK7-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
10723 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10724 // CHECK7-NEXT:    store i32 0, i32* [[A]], align 4
10725 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
10726 // CHECK7-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
10727 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
10728 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
10729 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
10730 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
10731 // CHECK7-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
10732 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
10733 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
10734 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
10735 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
10736 // CHECK7-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
10737 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
10738 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
10739 // CHECK7-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
10740 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
10741 // CHECK7-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
10742 // CHECK7-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
10743 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
10744 // CHECK7-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
10745 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
10746 // CHECK7-NEXT:    ret i32 [[TMP8]]
10747 //
10748 //
10749 // CHECK7-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
10750 // CHECK7-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
10751 // CHECK7-NEXT:  entry:
10752 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
10753 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10754 // CHECK7-NEXT:    [[B:%.*]] = alloca i32, align 4
10755 // CHECK7-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
10756 // CHECK7-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
10757 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
10758 // CHECK7-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
10759 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
10760 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4
10761 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4
10762 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4
10763 // CHECK7-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4
10764 // CHECK7-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
10765 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10766 // CHECK7-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
10767 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
10768 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
10769 // CHECK7-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
10770 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
10771 // CHECK7-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
10772 // CHECK7-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
10773 // CHECK7-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
10774 // CHECK7-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
10775 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
10776 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
10777 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
10778 // CHECK7-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
10779 // CHECK7-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
10780 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B]], align 4
10781 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[B_CASTED]], align 4
10782 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
10783 // CHECK7-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
10784 // CHECK7-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
10785 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
10786 // CHECK7-NEXT:    [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8
10787 // CHECK7-NEXT:    store i8 [[FROMBOOL2]], i8* [[CONV]], align 1
10788 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
10789 // CHECK7-NEXT:    [[TMP9:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
10790 // CHECK7-NEXT:    [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1
10791 // CHECK7-NEXT:    br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
10792 // CHECK7:       omp_if.then:
10793 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
10794 // CHECK7-NEXT:    [[TMP10:%.*]] = mul nuw i32 2, [[TMP1]]
10795 // CHECK7-NEXT:    [[TMP11:%.*]] = mul nuw i32 [[TMP10]], 2
10796 // CHECK7-NEXT:    [[TMP12:%.*]] = sext i32 [[TMP11]] to i64
10797 // CHECK7-NEXT:    [[TMP13:%.*]] = bitcast [6 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
10798 // CHECK7-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP13]], i8* align 4 bitcast ([6 x i64]* @.offload_sizes.11 to i8*), i32 48, i1 false)
10799 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10800 // CHECK7-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.S1**
10801 // CHECK7-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP15]], align 4
10802 // CHECK7-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10803 // CHECK7-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to double**
10804 // CHECK7-NEXT:    store double* [[A]], double** [[TMP17]], align 4
10805 // CHECK7-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
10806 // CHECK7-NEXT:    store i8* null, i8** [[TMP18]], align 4
10807 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
10808 // CHECK7-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
10809 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[TMP20]], align 4
10810 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
10811 // CHECK7-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
10812 // CHECK7-NEXT:    store i32 [[TMP6]], i32* [[TMP22]], align 4
10813 // CHECK7-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
10814 // CHECK7-NEXT:    store i8* null, i8** [[TMP23]], align 4
10815 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
10816 // CHECK7-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
10817 // CHECK7-NEXT:    store i32 2, i32* [[TMP25]], align 4
10818 // CHECK7-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
10819 // CHECK7-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
10820 // CHECK7-NEXT:    store i32 2, i32* [[TMP27]], align 4
10821 // CHECK7-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
10822 // CHECK7-NEXT:    store i8* null, i8** [[TMP28]], align 4
10823 // CHECK7-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
10824 // CHECK7-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
10825 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP30]], align 4
10826 // CHECK7-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
10827 // CHECK7-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32*
10828 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP32]], align 4
10829 // CHECK7-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
10830 // CHECK7-NEXT:    store i8* null, i8** [[TMP33]], align 4
10831 // CHECK7-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
10832 // CHECK7-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
10833 // CHECK7-NEXT:    store i16* [[VLA]], i16** [[TMP35]], align 4
10834 // CHECK7-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
10835 // CHECK7-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
10836 // CHECK7-NEXT:    store i16* [[VLA]], i16** [[TMP37]], align 4
10837 // CHECK7-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
10838 // CHECK7-NEXT:    store i64 [[TMP12]], i64* [[TMP38]], align 4
10839 // CHECK7-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
10840 // CHECK7-NEXT:    store i8* null, i8** [[TMP39]], align 4
10841 // CHECK7-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
10842 // CHECK7-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
10843 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[TMP41]], align 4
10844 // CHECK7-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
10845 // CHECK7-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32*
10846 // CHECK7-NEXT:    store i32 [[TMP8]], i32* [[TMP43]], align 4
10847 // CHECK7-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5
10848 // CHECK7-NEXT:    store i8* null, i8** [[TMP44]], align 4
10849 // CHECK7-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10850 // CHECK7-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10851 // CHECK7-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
10852 // CHECK7-NEXT:    [[TMP48:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
10853 // CHECK7-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP48]] to i1
10854 // CHECK7-NEXT:    [[TMP49:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1
10855 // CHECK7-NEXT:    [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP45]], i8** [[TMP46]], i64* [[TMP47]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP49]])
10856 // CHECK7-NEXT:    [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0
10857 // CHECK7-NEXT:    br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
10858 // CHECK7:       omp_offload.failed:
10859 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]]
10860 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
10861 // CHECK7:       omp_offload.cont:
10862 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
10863 // CHECK7:       omp_if.else:
10864 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]]
10865 // CHECK7-NEXT:    br label [[OMP_IF_END]]
10866 // CHECK7:       omp_if.end:
10867 // CHECK7-NEXT:    [[TMP52:%.*]] = mul nsw i32 1, [[TMP1]]
10868 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP52]]
10869 // CHECK7-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
10870 // CHECK7-NEXT:    [[TMP53:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2
10871 // CHECK7-NEXT:    [[CONV6:%.*]] = sext i16 [[TMP53]] to i32
10872 // CHECK7-NEXT:    [[TMP54:%.*]] = load i32, i32* [[B]], align 4
10873 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP54]]
10874 // CHECK7-NEXT:    [[TMP55:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
10875 // CHECK7-NEXT:    call void @llvm.stackrestore(i8* [[TMP55]])
10876 // CHECK7-NEXT:    ret i32 [[ADD7]]
10877 //
10878 //
10879 // CHECK7-LABEL: define {{[^@]+}}@_ZL7fstatici
10880 // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
10881 // CHECK7-NEXT:  entry:
10882 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10883 // CHECK7-NEXT:    [[A:%.*]] = alloca i32, align 4
10884 // CHECK7-NEXT:    [[AA:%.*]] = alloca i16, align 2
10885 // CHECK7-NEXT:    [[AAA:%.*]] = alloca i8, align 1
10886 // CHECK7-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
10887 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
10888 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
10889 // CHECK7-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
10890 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
10891 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
10892 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
10893 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10894 // CHECK7-NEXT:    store i32 0, i32* [[A]], align 4
10895 // CHECK7-NEXT:    store i16 0, i16* [[AA]], align 2
10896 // CHECK7-NEXT:    store i8 0, i8* [[AAA]], align 1
10897 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
10898 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
10899 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
10900 // CHECK7-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
10901 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
10902 // CHECK7-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
10903 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
10904 // CHECK7-NEXT:    [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
10905 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
10906 // CHECK7-NEXT:    store i8 [[TMP4]], i8* [[CONV1]], align 1
10907 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
10908 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
10909 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
10910 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
10911 // CHECK7:       omp_if.then:
10912 // CHECK7-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10913 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
10914 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
10915 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10916 // CHECK7-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
10917 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
10918 // CHECK7-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
10919 // CHECK7-NEXT:    store i8* null, i8** [[TMP11]], align 4
10920 // CHECK7-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
10921 // CHECK7-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
10922 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
10923 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
10924 // CHECK7-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
10925 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
10926 // CHECK7-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
10927 // CHECK7-NEXT:    store i8* null, i8** [[TMP16]], align 4
10928 // CHECK7-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
10929 // CHECK7-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
10930 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[TMP18]], align 4
10931 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
10932 // CHECK7-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
10933 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[TMP20]], align 4
10934 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
10935 // CHECK7-NEXT:    store i8* null, i8** [[TMP21]], align 4
10936 // CHECK7-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
10937 // CHECK7-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
10938 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4
10939 // CHECK7-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
10940 // CHECK7-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
10941 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4
10942 // CHECK7-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
10943 // CHECK7-NEXT:    store i8* null, i8** [[TMP26]], align 4
10944 // CHECK7-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10945 // CHECK7-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10946 // CHECK7-NEXT:    [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
10947 // CHECK7-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
10948 // CHECK7-NEXT:    br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
10949 // CHECK7:       omp_offload.failed:
10950 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
10951 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
10952 // CHECK7:       omp_offload.cont:
10953 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
10954 // CHECK7:       omp_if.else:
10955 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
10956 // CHECK7-NEXT:    br label [[OMP_IF_END]]
10957 // CHECK7:       omp_if.end:
10958 // CHECK7-NEXT:    [[TMP31:%.*]] = load i32, i32* [[A]], align 4
10959 // CHECK7-NEXT:    ret i32 [[TMP31]]
10960 //
10961 //
10962 // CHECK7-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
10963 // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
10964 // CHECK7-NEXT:  entry:
10965 // CHECK7-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10966 // CHECK7-NEXT:    [[A:%.*]] = alloca i32, align 4
10967 // CHECK7-NEXT:    [[AA:%.*]] = alloca i16, align 2
10968 // CHECK7-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
10969 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
10970 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
10971 // CHECK7-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
10972 // CHECK7-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
10973 // CHECK7-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
10974 // CHECK7-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10975 // CHECK7-NEXT:    store i32 0, i32* [[A]], align 4
10976 // CHECK7-NEXT:    store i16 0, i16* [[AA]], align 2
10977 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
10978 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
10979 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
10980 // CHECK7-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
10981 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
10982 // CHECK7-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
10983 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
10984 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
10985 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
10986 // CHECK7-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
10987 // CHECK7:       omp_if.then:
10988 // CHECK7-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10989 // CHECK7-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
10990 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
10991 // CHECK7-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10992 // CHECK7-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
10993 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
10994 // CHECK7-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
10995 // CHECK7-NEXT:    store i8* null, i8** [[TMP9]], align 4
10996 // CHECK7-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
10997 // CHECK7-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
10998 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
10999 // CHECK7-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
11000 // CHECK7-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
11001 // CHECK7-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
11002 // CHECK7-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
11003 // CHECK7-NEXT:    store i8* null, i8** [[TMP14]], align 4
11004 // CHECK7-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
11005 // CHECK7-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
11006 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
11007 // CHECK7-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
11008 // CHECK7-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
11009 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
11010 // CHECK7-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
11011 // CHECK7-NEXT:    store i8* null, i8** [[TMP19]], align 4
11012 // CHECK7-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11013 // CHECK7-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11014 // CHECK7-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
11015 // CHECK7-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
11016 // CHECK7-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
11017 // CHECK7:       omp_offload.failed:
11018 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
11019 // CHECK7-NEXT:    br label [[OMP_OFFLOAD_CONT]]
11020 // CHECK7:       omp_offload.cont:
11021 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
11022 // CHECK7:       omp_if.else:
11023 // CHECK7-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
11024 // CHECK7-NEXT:    br label [[OMP_IF_END]]
11025 // CHECK7:       omp_if.end:
11026 // CHECK7-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
11027 // CHECK7-NEXT:    ret i32 [[TMP24]]
11028 //
11029 //
11030 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214
11031 // CHECK7-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
11032 // CHECK7-NEXT:  entry:
11033 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
11034 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
11035 // CHECK7-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
11036 // CHECK7-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
11037 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
11038 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11039 // CHECK7-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
11040 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11041 // CHECK7-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
11042 // CHECK7-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
11043 // CHECK7-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
11044 // CHECK7-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
11045 // CHECK7-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
11046 // CHECK7-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
11047 // CHECK7-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
11048 // CHECK7-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
11049 // CHECK7-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11050 // CHECK7-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
11051 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
11052 // CHECK7-NEXT:    [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
11053 // CHECK7-NEXT:    [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4
11054 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
11055 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4
11056 // CHECK7-NEXT:    store i32 [[TMP5]], i32* [[B_CASTED]], align 4
11057 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
11058 // CHECK7-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV]], align 1
11059 // CHECK7-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
11060 // CHECK7-NEXT:    [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
11061 // CHECK7-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
11062 // CHECK7-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV3]], align 1
11063 // CHECK7-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
11064 // CHECK7-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1
11065 // CHECK7-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1
11066 // CHECK7-NEXT:    br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
11067 // CHECK7:       omp_if.then:
11068 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]])
11069 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
11070 // CHECK7:       omp_if.else:
11071 // CHECK7-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
11072 // CHECK7-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
11073 // CHECK7-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
11074 // CHECK7-NEXT:    call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR4]]
11075 // CHECK7-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
11076 // CHECK7-NEXT:    br label [[OMP_IF_END]]
11077 // CHECK7:       omp_if.end:
11078 // CHECK7-NEXT:    ret void
11079 //
11080 //
11081 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..10
11082 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
11083 // CHECK7-NEXT:  entry:
11084 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11085 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11086 // CHECK7-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
11087 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
11088 // CHECK7-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
11089 // CHECK7-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
11090 // CHECK7-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
11091 // CHECK7-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11092 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
11093 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i64, align 4
11094 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
11095 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
11096 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
11097 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11098 // CHECK7-NEXT:    [[IT:%.*]] = alloca i64, align 8
11099 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11100 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11101 // CHECK7-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
11102 // CHECK7-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
11103 // CHECK7-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
11104 // CHECK7-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
11105 // CHECK7-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
11106 // CHECK7-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11107 // CHECK7-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
11108 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
11109 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
11110 // CHECK7-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
11111 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
11112 // CHECK7-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
11113 // CHECK7-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
11114 // CHECK7-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
11115 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11116 // CHECK7-NEXT:    [[TMP4:%.*]] = load i8, i8* [[CONV]], align 1
11117 // CHECK7-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1
11118 // CHECK7-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
11119 // CHECK7:       omp_if.then:
11120 // CHECK7-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11121 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
11122 // CHECK7-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
11123 // CHECK7-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
11124 // CHECK7-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3
11125 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11126 // CHECK7:       cond.true:
11127 // CHECK7-NEXT:    br label [[COND_END:%.*]]
11128 // CHECK7:       cond.false:
11129 // CHECK7-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
11130 // CHECK7-NEXT:    br label [[COND_END]]
11131 // CHECK7:       cond.end:
11132 // CHECK7-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
11133 // CHECK7-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
11134 // CHECK7-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
11135 // CHECK7-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8
11136 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11137 // CHECK7:       omp.inner.for.cond:
11138 // CHECK7-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
11139 // CHECK7-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !39
11140 // CHECK7-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]]
11141 // CHECK7-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11142 // CHECK7:       omp.inner.for.body:
11143 // CHECK7-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
11144 // CHECK7-NEXT:    [[MUL:%.*]] = mul i64 [[TMP12]], 400
11145 // CHECK7-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
11146 // CHECK7-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !39
11147 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39
11148 // CHECK7-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double
11149 // CHECK7-NEXT:    [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00
11150 // CHECK7-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
11151 // CHECK7-NEXT:    store double [[ADD]], double* [[A]], align 4, !nontemporal !40, !llvm.access.group !39
11152 // CHECK7-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
11153 // CHECK7-NEXT:    [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !40, !llvm.access.group !39
11154 // CHECK7-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
11155 // CHECK7-NEXT:    store double [[INC]], double* [[A5]], align 4, !nontemporal !40, !llvm.access.group !39
11156 // CHECK7-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
11157 // CHECK7-NEXT:    [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]]
11158 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]]
11159 // CHECK7-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
11160 // CHECK7-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !39
11161 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11162 // CHECK7:       omp.body.continue:
11163 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11164 // CHECK7:       omp.inner.for.inc:
11165 // CHECK7-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
11166 // CHECK7-NEXT:    [[ADD8:%.*]] = add i64 [[TMP16]], 1
11167 // CHECK7-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
11168 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
11169 // CHECK7:       omp.inner.for.end:
11170 // CHECK7-NEXT:    br label [[OMP_IF_END:%.*]]
11171 // CHECK7:       omp_if.else:
11172 // CHECK7-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11173 // CHECK7-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
11174 // CHECK7-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
11175 // CHECK7-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
11176 // CHECK7-NEXT:    [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3
11177 // CHECK7-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
11178 // CHECK7:       cond.true10:
11179 // CHECK7-NEXT:    br label [[COND_END12:%.*]]
11180 // CHECK7:       cond.false11:
11181 // CHECK7-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
11182 // CHECK7-NEXT:    br label [[COND_END12]]
11183 // CHECK7:       cond.end12:
11184 // CHECK7-NEXT:    [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ]
11185 // CHECK7-NEXT:    store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8
11186 // CHECK7-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
11187 // CHECK7-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8
11188 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND14:%.*]]
11189 // CHECK7:       omp.inner.for.cond14:
11190 // CHECK7-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
11191 // CHECK7-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
11192 // CHECK7-NEXT:    [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
11193 // CHECK7-NEXT:    br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]]
11194 // CHECK7:       omp.inner.for.body16:
11195 // CHECK7-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
11196 // CHECK7-NEXT:    [[MUL17:%.*]] = mul i64 [[TMP24]], 400
11197 // CHECK7-NEXT:    [[SUB18:%.*]] = sub i64 2000, [[MUL17]]
11198 // CHECK7-NEXT:    store i64 [[SUB18]], i64* [[IT]], align 8
11199 // CHECK7-NEXT:    [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4
11200 // CHECK7-NEXT:    [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double
11201 // CHECK7-NEXT:    [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00
11202 // CHECK7-NEXT:    [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
11203 // CHECK7-NEXT:    store double [[ADD20]], double* [[A21]], align 4
11204 // CHECK7-NEXT:    [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
11205 // CHECK7-NEXT:    [[TMP26:%.*]] = load double, double* [[A22]], align 4
11206 // CHECK7-NEXT:    [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00
11207 // CHECK7-NEXT:    store double [[INC23]], double* [[A22]], align 4
11208 // CHECK7-NEXT:    [[CONV24:%.*]] = fptosi double [[INC23]] to i16
11209 // CHECK7-NEXT:    [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]]
11210 // CHECK7-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]]
11211 // CHECK7-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1
11212 // CHECK7-NEXT:    store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2
11213 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE27:%.*]]
11214 // CHECK7:       omp.body.continue27:
11215 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC28:%.*]]
11216 // CHECK7:       omp.inner.for.inc28:
11217 // CHECK7-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
11218 // CHECK7-NEXT:    [[ADD29:%.*]] = add i64 [[TMP28]], 1
11219 // CHECK7-NEXT:    store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8
11220 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP43:![0-9]+]]
11221 // CHECK7:       omp.inner.for.end30:
11222 // CHECK7-NEXT:    br label [[OMP_IF_END]]
11223 // CHECK7:       omp_if.end:
11224 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11225 // CHECK7:       omp.loop.exit:
11226 // CHECK7-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11227 // CHECK7-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
11228 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
11229 // CHECK7-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11230 // CHECK7-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11231 // CHECK7-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11232 // CHECK7:       .omp.final.then:
11233 // CHECK7-NEXT:    store i64 400, i64* [[IT]], align 8
11234 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11235 // CHECK7:       .omp.final.done:
11236 // CHECK7-NEXT:    ret void
11237 //
11238 //
11239 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
11240 // CHECK7-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
11241 // CHECK7-NEXT:  entry:
11242 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11243 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11244 // CHECK7-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
11245 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
11246 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
11247 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
11248 // CHECK7-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
11249 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11250 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11251 // CHECK7-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
11252 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
11253 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11254 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
11255 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
11256 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
11257 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
11258 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
11259 // CHECK7-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
11260 // CHECK7-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
11261 // CHECK7-NEXT:    store i16 [[TMP3]], i16* [[CONV2]], align 2
11262 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
11263 // CHECK7-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1
11264 // CHECK7-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
11265 // CHECK7-NEXT:    store i8 [[TMP5]], i8* [[CONV3]], align 1
11266 // CHECK7-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
11267 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
11268 // CHECK7-NEXT:    ret void
11269 //
11270 //
11271 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..13
11272 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
11273 // CHECK7-NEXT:  entry:
11274 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11275 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11276 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11277 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11278 // CHECK7-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
11279 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
11280 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11281 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11282 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11283 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11284 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11285 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11286 // CHECK7-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
11287 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
11288 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11289 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
11290 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
11291 // CHECK7-NEXT:    ret void
11292 //
11293 //
11294 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
11295 // CHECK7-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
11296 // CHECK7-NEXT:  entry:
11297 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11298 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11299 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
11300 // CHECK7-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
11301 // CHECK7-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
11302 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11303 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11304 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
11305 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11306 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
11307 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
11308 // CHECK7-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
11309 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
11310 // CHECK7-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
11311 // CHECK7-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
11312 // CHECK7-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
11313 // CHECK7-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
11314 // CHECK7-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
11315 // CHECK7-NEXT:    ret void
11316 //
11317 //
11318 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..16
11319 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
11320 // CHECK7-NEXT:  entry:
11321 // CHECK7-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11322 // CHECK7-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11323 // CHECK7-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11324 // CHECK7-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11325 // CHECK7-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
11326 // CHECK7-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
11327 // CHECK7-NEXT:    [[TMP:%.*]] = alloca i64, align 4
11328 // CHECK7-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
11329 // CHECK7-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
11330 // CHECK7-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
11331 // CHECK7-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11332 // CHECK7-NEXT:    [[I:%.*]] = alloca i64, align 8
11333 // CHECK7-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11334 // CHECK7-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11335 // CHECK7-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11336 // CHECK7-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11337 // CHECK7-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
11338 // CHECK7-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11339 // CHECK7-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
11340 // CHECK7-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
11341 // CHECK7-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
11342 // CHECK7-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
11343 // CHECK7-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11344 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11345 // CHECK7-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
11346 // CHECK7-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
11347 // CHECK7-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
11348 // CHECK7-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
11349 // CHECK7-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11350 // CHECK7:       cond.true:
11351 // CHECK7-NEXT:    br label [[COND_END:%.*]]
11352 // CHECK7:       cond.false:
11353 // CHECK7-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
11354 // CHECK7-NEXT:    br label [[COND_END]]
11355 // CHECK7:       cond.end:
11356 // CHECK7-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
11357 // CHECK7-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
11358 // CHECK7-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
11359 // CHECK7-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
11360 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11361 // CHECK7:       omp.inner.for.cond:
11362 // CHECK7-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45
11363 // CHECK7-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !45
11364 // CHECK7-NEXT:    [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
11365 // CHECK7-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11366 // CHECK7:       omp.inner.for.body:
11367 // CHECK7-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45
11368 // CHECK7-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
11369 // CHECK7-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
11370 // CHECK7-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !45
11371 // CHECK7-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !45
11372 // CHECK7-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
11373 // CHECK7-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !45
11374 // CHECK7-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !45
11375 // CHECK7-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
11376 // CHECK7-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
11377 // CHECK7-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
11378 // CHECK7-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !45
11379 // CHECK7-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
11380 // CHECK7-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
11381 // CHECK7-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
11382 // CHECK7-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
11383 // CHECK7-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11384 // CHECK7:       omp.body.continue:
11385 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11386 // CHECK7:       omp.inner.for.inc:
11387 // CHECK7-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45
11388 // CHECK7-NEXT:    [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1
11389 // CHECK7-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45
11390 // CHECK7-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
11391 // CHECK7:       omp.inner.for.end:
11392 // CHECK7-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11393 // CHECK7:       omp.loop.exit:
11394 // CHECK7-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
11395 // CHECK7-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11396 // CHECK7-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
11397 // CHECK7-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11398 // CHECK7:       .omp.final.then:
11399 // CHECK7-NEXT:    store i64 11, i64* [[I]], align 8
11400 // CHECK7-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11401 // CHECK7:       .omp.final.done:
11402 // CHECK7-NEXT:    ret void
11403 //
11404 //
11405 // CHECK7-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
11406 // CHECK7-SAME: () #[[ATTR8:[0-9]+]] {
11407 // CHECK7-NEXT:  entry:
11408 // CHECK7-NEXT:    call void @__tgt_register_requires(i64 1)
11409 // CHECK7-NEXT:    ret void
11410 //
11411 //
11412 // CHECK8-LABEL: define {{[^@]+}}@_Z7get_valv
11413 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] {
11414 // CHECK8-NEXT:  entry:
11415 // CHECK8-NEXT:    ret i64 0
11416 //
11417 //
11418 // CHECK8-LABEL: define {{[^@]+}}@_Z3fooi
11419 // CHECK8-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
11420 // CHECK8-NEXT:  entry:
11421 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
11422 // CHECK8-NEXT:    [[A:%.*]] = alloca i32, align 4
11423 // CHECK8-NEXT:    [[AA:%.*]] = alloca i16, align 2
11424 // CHECK8-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
11425 // CHECK8-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
11426 // CHECK8-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
11427 // CHECK8-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
11428 // CHECK8-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
11429 // CHECK8-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
11430 // CHECK8-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
11431 // CHECK8-NEXT:    [[K:%.*]] = alloca i64, align 8
11432 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
11433 // CHECK8-NEXT:    [[LIN:%.*]] = alloca i32, align 4
11434 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
11435 // CHECK8-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
11436 // CHECK8-NEXT:    [[A_CASTED2:%.*]] = alloca i32, align 4
11437 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
11438 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
11439 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
11440 // CHECK8-NEXT:    [[A_CASTED3:%.*]] = alloca i32, align 4
11441 // CHECK8-NEXT:    [[AA_CASTED4:%.*]] = alloca i32, align 4
11442 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4
11443 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4
11444 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4
11445 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11446 // CHECK8-NEXT:    [[A_CASTED11:%.*]] = alloca i32, align 4
11447 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11448 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4
11449 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4
11450 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4
11451 // CHECK8-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4
11452 // CHECK8-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
11453 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
11454 // CHECK8-NEXT:    store i32 0, i32* [[A]], align 4
11455 // CHECK8-NEXT:    store i16 0, i16* [[AA]], align 2
11456 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
11457 // CHECK8-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
11458 // CHECK8-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
11459 // CHECK8-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
11460 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
11461 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
11462 // CHECK8-NEXT:    [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
11463 // CHECK8-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
11464 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
11465 // CHECK8-NEXT:    [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1)
11466 // CHECK8-NEXT:    [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates*
11467 // CHECK8-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0
11468 // CHECK8-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]])
11469 // CHECK8-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
11470 // CHECK8-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
11471 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A]], align 4
11472 // CHECK8-NEXT:    store i32 [[TMP9]], i32* [[A_CASTED]], align 4
11473 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4
11474 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]]
11475 // CHECK8-NEXT:    store i32 12, i32* [[LIN]], align 4
11476 // CHECK8-NEXT:    [[TMP11:%.*]] = load i16, i16* [[AA]], align 2
11477 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
11478 // CHECK8-NEXT:    store i16 [[TMP11]], i16* [[CONV]], align 2
11479 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4
11480 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4
11481 // CHECK8-NEXT:    store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4
11482 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
11483 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[A]], align 4
11484 // CHECK8-NEXT:    store i32 [[TMP15]], i32* [[A_CASTED2]], align 4
11485 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4
11486 // CHECK8-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11487 // CHECK8-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
11488 // CHECK8-NEXT:    store i32 [[TMP12]], i32* [[TMP18]], align 4
11489 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11490 // CHECK8-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
11491 // CHECK8-NEXT:    store i32 [[TMP12]], i32* [[TMP20]], align 4
11492 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
11493 // CHECK8-NEXT:    store i8* null, i8** [[TMP21]], align 4
11494 // CHECK8-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
11495 // CHECK8-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32*
11496 // CHECK8-NEXT:    store i32 [[TMP14]], i32* [[TMP23]], align 4
11497 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
11498 // CHECK8-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
11499 // CHECK8-NEXT:    store i32 [[TMP14]], i32* [[TMP25]], align 4
11500 // CHECK8-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
11501 // CHECK8-NEXT:    store i8* null, i8** [[TMP26]], align 4
11502 // CHECK8-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
11503 // CHECK8-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32*
11504 // CHECK8-NEXT:    store i32 [[TMP16]], i32* [[TMP28]], align 4
11505 // CHECK8-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
11506 // CHECK8-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
11507 // CHECK8-NEXT:    store i32 [[TMP16]], i32* [[TMP30]], align 4
11508 // CHECK8-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
11509 // CHECK8-NEXT:    store i8* null, i8** [[TMP31]], align 4
11510 // CHECK8-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11511 // CHECK8-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11512 // CHECK8-NEXT:    [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, i32 3, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
11513 // CHECK8-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
11514 // CHECK8-NEXT:    br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
11515 // CHECK8:       omp_offload.failed:
11516 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]]
11517 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
11518 // CHECK8:       omp_offload.cont:
11519 // CHECK8-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A]], align 4
11520 // CHECK8-NEXT:    store i32 [[TMP36]], i32* [[A_CASTED3]], align 4
11521 // CHECK8-NEXT:    [[TMP37:%.*]] = load i32, i32* [[A_CASTED3]], align 4
11522 // CHECK8-NEXT:    [[TMP38:%.*]] = load i16, i16* [[AA]], align 2
11523 // CHECK8-NEXT:    [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16*
11524 // CHECK8-NEXT:    store i16 [[TMP38]], i16* [[CONV5]], align 2
11525 // CHECK8-NEXT:    [[TMP39:%.*]] = load i32, i32* [[AA_CASTED4]], align 4
11526 // CHECK8-NEXT:    [[TMP40:%.*]] = load i32, i32* [[N_ADDR]], align 4
11527 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP40]], 10
11528 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
11529 // CHECK8:       omp_if.then:
11530 // CHECK8-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
11531 // CHECK8-NEXT:    [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32*
11532 // CHECK8-NEXT:    store i32 [[TMP37]], i32* [[TMP42]], align 4
11533 // CHECK8-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
11534 // CHECK8-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
11535 // CHECK8-NEXT:    store i32 [[TMP37]], i32* [[TMP44]], align 4
11536 // CHECK8-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0
11537 // CHECK8-NEXT:    store i8* null, i8** [[TMP45]], align 4
11538 // CHECK8-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1
11539 // CHECK8-NEXT:    [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32*
11540 // CHECK8-NEXT:    store i32 [[TMP39]], i32* [[TMP47]], align 4
11541 // CHECK8-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1
11542 // CHECK8-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32*
11543 // CHECK8-NEXT:    store i32 [[TMP39]], i32* [[TMP49]], align 4
11544 // CHECK8-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1
11545 // CHECK8-NEXT:    store i8* null, i8** [[TMP50]], align 4
11546 // CHECK8-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0
11547 // CHECK8-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0
11548 // CHECK8-NEXT:    [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, i32 2, i8** [[TMP51]], i8** [[TMP52]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
11549 // CHECK8-NEXT:    [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0
11550 // CHECK8-NEXT:    br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]]
11551 // CHECK8:       omp_offload.failed9:
11552 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]]
11553 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT10]]
11554 // CHECK8:       omp_offload.cont10:
11555 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
11556 // CHECK8:       omp_if.else:
11557 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP37]], i32 [[TMP39]]) #[[ATTR4]]
11558 // CHECK8-NEXT:    br label [[OMP_IF_END]]
11559 // CHECK8:       omp_if.end:
11560 // CHECK8-NEXT:    [[TMP55:%.*]] = load i32, i32* [[A]], align 4
11561 // CHECK8-NEXT:    store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_]], align 4
11562 // CHECK8-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
11563 // CHECK8-NEXT:    store i32 [[TMP56]], i32* [[A_CASTED11]], align 4
11564 // CHECK8-NEXT:    [[TMP57:%.*]] = load i32, i32* [[A_CASTED11]], align 4
11565 // CHECK8-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11566 // CHECK8-NEXT:    store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
11567 // CHECK8-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
11568 // CHECK8-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N_ADDR]], align 4
11569 // CHECK8-NEXT:    [[CMP12:%.*]] = icmp sgt i32 [[TMP60]], 20
11570 // CHECK8-NEXT:    br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]]
11571 // CHECK8:       omp_if.then13:
11572 // CHECK8-NEXT:    [[TMP61:%.*]] = mul nuw i32 [[TMP1]], 4
11573 // CHECK8-NEXT:    [[TMP62:%.*]] = sext i32 [[TMP61]] to i64
11574 // CHECK8-NEXT:    [[TMP63:%.*]] = mul nuw i32 5, [[TMP3]]
11575 // CHECK8-NEXT:    [[TMP64:%.*]] = mul nuw i32 [[TMP63]], 8
11576 // CHECK8-NEXT:    [[TMP65:%.*]] = sext i32 [[TMP64]] to i64
11577 // CHECK8-NEXT:    [[TMP66:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
11578 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP66]], i8* align 4 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i32 80, i1 false)
11579 // CHECK8-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
11580 // CHECK8-NEXT:    [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32*
11581 // CHECK8-NEXT:    store i32 [[TMP57]], i32* [[TMP68]], align 4
11582 // CHECK8-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
11583 // CHECK8-NEXT:    [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32*
11584 // CHECK8-NEXT:    store i32 [[TMP57]], i32* [[TMP70]], align 4
11585 // CHECK8-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0
11586 // CHECK8-NEXT:    store i8* null, i8** [[TMP71]], align 4
11587 // CHECK8-NEXT:    [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1
11588 // CHECK8-NEXT:    [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]**
11589 // CHECK8-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4
11590 // CHECK8-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1
11591 // CHECK8-NEXT:    [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]**
11592 // CHECK8-NEXT:    store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4
11593 // CHECK8-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1
11594 // CHECK8-NEXT:    store i8* null, i8** [[TMP76]], align 4
11595 // CHECK8-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2
11596 // CHECK8-NEXT:    [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
11597 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP78]], align 4
11598 // CHECK8-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2
11599 // CHECK8-NEXT:    [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
11600 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP80]], align 4
11601 // CHECK8-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2
11602 // CHECK8-NEXT:    store i8* null, i8** [[TMP81]], align 4
11603 // CHECK8-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3
11604 // CHECK8-NEXT:    [[TMP83:%.*]] = bitcast i8** [[TMP82]] to float**
11605 // CHECK8-NEXT:    store float* [[VLA]], float** [[TMP83]], align 4
11606 // CHECK8-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3
11607 // CHECK8-NEXT:    [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float**
11608 // CHECK8-NEXT:    store float* [[VLA]], float** [[TMP85]], align 4
11609 // CHECK8-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3
11610 // CHECK8-NEXT:    store i64 [[TMP62]], i64* [[TMP86]], align 4
11611 // CHECK8-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3
11612 // CHECK8-NEXT:    store i8* null, i8** [[TMP87]], align 4
11613 // CHECK8-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4
11614 // CHECK8-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]**
11615 // CHECK8-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 4
11616 // CHECK8-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4
11617 // CHECK8-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]**
11618 // CHECK8-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4
11619 // CHECK8-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4
11620 // CHECK8-NEXT:    store i8* null, i8** [[TMP92]], align 4
11621 // CHECK8-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5
11622 // CHECK8-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32*
11623 // CHECK8-NEXT:    store i32 5, i32* [[TMP94]], align 4
11624 // CHECK8-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5
11625 // CHECK8-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32*
11626 // CHECK8-NEXT:    store i32 5, i32* [[TMP96]], align 4
11627 // CHECK8-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5
11628 // CHECK8-NEXT:    store i8* null, i8** [[TMP97]], align 4
11629 // CHECK8-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6
11630 // CHECK8-NEXT:    [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32*
11631 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP99]], align 4
11632 // CHECK8-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6
11633 // CHECK8-NEXT:    [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32*
11634 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP101]], align 4
11635 // CHECK8-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6
11636 // CHECK8-NEXT:    store i8* null, i8** [[TMP102]], align 4
11637 // CHECK8-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7
11638 // CHECK8-NEXT:    [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double**
11639 // CHECK8-NEXT:    store double* [[VLA1]], double** [[TMP104]], align 4
11640 // CHECK8-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7
11641 // CHECK8-NEXT:    [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double**
11642 // CHECK8-NEXT:    store double* [[VLA1]], double** [[TMP106]], align 4
11643 // CHECK8-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7
11644 // CHECK8-NEXT:    store i64 [[TMP65]], i64* [[TMP107]], align 4
11645 // CHECK8-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7
11646 // CHECK8-NEXT:    store i8* null, i8** [[TMP108]], align 4
11647 // CHECK8-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8
11648 // CHECK8-NEXT:    [[TMP110:%.*]] = bitcast i8** [[TMP109]] to %struct.TT**
11649 // CHECK8-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP110]], align 4
11650 // CHECK8-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8
11651 // CHECK8-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to %struct.TT**
11652 // CHECK8-NEXT:    store %struct.TT* [[D]], %struct.TT** [[TMP112]], align 4
11653 // CHECK8-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8
11654 // CHECK8-NEXT:    store i8* null, i8** [[TMP113]], align 4
11655 // CHECK8-NEXT:    [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9
11656 // CHECK8-NEXT:    [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
11657 // CHECK8-NEXT:    store i32 [[TMP59]], i32* [[TMP115]], align 4
11658 // CHECK8-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9
11659 // CHECK8-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
11660 // CHECK8-NEXT:    store i32 [[TMP59]], i32* [[TMP117]], align 4
11661 // CHECK8-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9
11662 // CHECK8-NEXT:    store i8* null, i8** [[TMP118]], align 4
11663 // CHECK8-NEXT:    [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
11664 // CHECK8-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
11665 // CHECK8-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
11666 // CHECK8-NEXT:    [[TMP122:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP119]], i8** [[TMP120]], i64* [[TMP121]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
11667 // CHECK8-NEXT:    [[TMP123:%.*]] = icmp ne i32 [[TMP122]], 0
11668 // CHECK8-NEXT:    br i1 [[TMP123]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
11669 // CHECK8:       omp_offload.failed17:
11670 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]]
11671 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
11672 // CHECK8:       omp_offload.cont18:
11673 // CHECK8-NEXT:    br label [[OMP_IF_END20:%.*]]
11674 // CHECK8:       omp_if.else19:
11675 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]]
11676 // CHECK8-NEXT:    br label [[OMP_IF_END20]]
11677 // CHECK8:       omp_if.end20:
11678 // CHECK8-NEXT:    [[TMP124:%.*]] = load i32, i32* [[A]], align 4
11679 // CHECK8-NEXT:    [[TMP125:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
11680 // CHECK8-NEXT:    call void @llvm.stackrestore(i8* [[TMP125]])
11681 // CHECK8-NEXT:    ret i32 [[TMP124]]
11682 //
11683 //
11684 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
11685 // CHECK8-SAME: () #[[ATTR2:[0-9]+]] {
11686 // CHECK8-NEXT:  entry:
11687 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
11688 // CHECK8-NEXT:    ret void
11689 //
11690 //
11691 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined.
11692 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
11693 // CHECK8-NEXT:  entry:
11694 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11695 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11696 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11697 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11698 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11699 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11700 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11701 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11702 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
11703 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11704 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11705 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11706 // CHECK8-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
11707 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11708 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11709 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11710 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
11711 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11712 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11713 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
11714 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11715 // CHECK8:       cond.true:
11716 // CHECK8-NEXT:    br label [[COND_END:%.*]]
11717 // CHECK8:       cond.false:
11718 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11719 // CHECK8-NEXT:    br label [[COND_END]]
11720 // CHECK8:       cond.end:
11721 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
11722 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11723 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11724 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
11725 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11726 // CHECK8:       omp.inner.for.cond:
11727 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
11728 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
11729 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
11730 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11731 // CHECK8:       omp.inner.for.body:
11732 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
11733 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
11734 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
11735 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
11736 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11737 // CHECK8:       omp.body.continue:
11738 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11739 // CHECK8:       omp.inner.for.inc:
11740 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
11741 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
11742 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
11743 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
11744 // CHECK8:       omp.inner.for.end:
11745 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11746 // CHECK8:       omp.loop.exit:
11747 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
11748 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11749 // CHECK8-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
11750 // CHECK8-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11751 // CHECK8:       .omp.final.then:
11752 // CHECK8-NEXT:    store i32 33, i32* [[I]], align 4
11753 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11754 // CHECK8:       .omp.final.done:
11755 // CHECK8-NEXT:    ret void
11756 //
11757 //
11758 // CHECK8-LABEL: define {{[^@]+}}@.omp_task_entry.
11759 // CHECK8-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
11760 // CHECK8-NEXT:  entry:
11761 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
11762 // CHECK8-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
11763 // CHECK8-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
11764 // CHECK8-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
11765 // CHECK8-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
11766 // CHECK8-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
11767 // CHECK8-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
11768 // CHECK8-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
11769 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
11770 // CHECK8-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
11771 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
11772 // CHECK8-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
11773 // CHECK8-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
11774 // CHECK8-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
11775 // CHECK8-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
11776 // CHECK8-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
11777 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
11778 // CHECK8-NEXT:    [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
11779 // CHECK8-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
11780 // CHECK8-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
11781 // CHECK8-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
11782 // CHECK8-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
11783 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26
11784 // CHECK8-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26
11785 // CHECK8-NEXT:    store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26
11786 // CHECK8-NEXT:    store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26
11787 // CHECK8-NEXT:    store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26
11788 // CHECK8-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26
11789 // CHECK8-NEXT:    [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26
11790 // CHECK8-NEXT:    [[TMP11:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]]
11791 // CHECK8-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
11792 // CHECK8-NEXT:    br i1 [[TMP12]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
11793 // CHECK8:       omp_offload.failed.i:
11794 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]]
11795 // CHECK8-NEXT:    br label [[DOTOMP_OUTLINED__1_EXIT]]
11796 // CHECK8:       .omp_outlined..1.exit:
11797 // CHECK8-NEXT:    ret i32 0
11798 //
11799 //
11800 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101
11801 // CHECK8-SAME: (i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] {
11802 // CHECK8-NEXT:  entry:
11803 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11804 // CHECK8-NEXT:    [[K_ADDR:%.*]] = alloca i64*, align 4
11805 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
11806 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11807 // CHECK8-NEXT:    store i64* [[K]], i64** [[K_ADDR]], align 4
11808 // CHECK8-NEXT:    [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4
11809 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
11810 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
11811 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
11812 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]])
11813 // CHECK8-NEXT:    ret void
11814 //
11815 //
11816 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..2
11817 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] {
11818 // CHECK8-NEXT:  entry:
11819 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11820 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11821 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11822 // CHECK8-NEXT:    [[K_ADDR:%.*]] = alloca i64*, align 4
11823 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11824 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11825 // CHECK8-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
11826 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11827 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11828 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11829 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11830 // CHECK8-NEXT:    [[I:%.*]] = alloca i32, align 4
11831 // CHECK8-NEXT:    [[K1:%.*]] = alloca i64, align 8
11832 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11833 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11834 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11835 // CHECK8-NEXT:    store i64* [[K]], i64** [[K_ADDR]], align 4
11836 // CHECK8-NEXT:    [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4
11837 // CHECK8-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
11838 // CHECK8-NEXT:    store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8
11839 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11840 // CHECK8-NEXT:    store i32 8, i32* [[DOTOMP_UB]], align 4
11841 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11842 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11843 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11844 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
11845 // CHECK8-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
11846 // CHECK8-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1073741859, i32 0, i32 8, i32 1, i32 1)
11847 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
11848 // CHECK8:       omp.dispatch.cond:
11849 // CHECK8-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
11850 // CHECK8-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0
11851 // CHECK8-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11852 // CHECK8:       omp.dispatch.body:
11853 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11854 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
11855 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11856 // CHECK8:       omp.inner.for.cond:
11857 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
11858 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27
11859 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
11860 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11861 // CHECK8:       omp.inner.for.body:
11862 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
11863 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
11864 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL]]
11865 // CHECK8-NEXT:    store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !27
11866 // CHECK8-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !27
11867 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
11868 // CHECK8-NEXT:    [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3
11869 // CHECK8-NEXT:    [[CONV:%.*]] = sext i32 [[MUL2]] to i64
11870 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]]
11871 // CHECK8-NEXT:    store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !27
11872 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !27
11873 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
11874 // CHECK8-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !27
11875 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11876 // CHECK8:       omp.body.continue:
11877 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11878 // CHECK8:       omp.inner.for.inc:
11879 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
11880 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
11881 // CHECK8-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
11882 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
11883 // CHECK8:       omp.inner.for.end:
11884 // CHECK8-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
11885 // CHECK8:       omp.dispatch.inc:
11886 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND]]
11887 // CHECK8:       omp.dispatch.end:
11888 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11889 // CHECK8-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
11890 // CHECK8-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11891 // CHECK8:       .omp.final.then:
11892 // CHECK8-NEXT:    store i32 1, i32* [[I]], align 4
11893 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11894 // CHECK8:       .omp.final.done:
11895 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11896 // CHECK8-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
11897 // CHECK8-NEXT:    br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
11898 // CHECK8:       .omp.linear.pu:
11899 // CHECK8-NEXT:    [[TMP17:%.*]] = load i64, i64* [[K1]], align 8
11900 // CHECK8-NEXT:    store i64 [[TMP17]], i64* [[TMP0]], align 8
11901 // CHECK8-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
11902 // CHECK8:       .omp.linear.pu.done:
11903 // CHECK8-NEXT:    ret void
11904 //
11905 //
11906 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
11907 // CHECK8-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR2]] {
11908 // CHECK8-NEXT:  entry:
11909 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11910 // CHECK8-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
11911 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11912 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
11913 // CHECK8-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
11914 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
11915 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11916 // CHECK8-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
11917 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11918 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11919 // CHECK8-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
11920 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
11921 // CHECK8-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
11922 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
11923 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
11924 // CHECK8-NEXT:    store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4
11925 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
11926 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
11927 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[A_CASTED]], align 4
11928 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4
11929 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]])
11930 // CHECK8-NEXT:    ret void
11931 //
11932 //
11933 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..3
11934 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] {
11935 // CHECK8-NEXT:  entry:
11936 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11937 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11938 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
11939 // CHECK8-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
11940 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11941 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
11942 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i64, align 4
11943 // CHECK8-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
11944 // CHECK8-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i32, align 4
11945 // CHECK8-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
11946 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
11947 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
11948 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
11949 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11950 // CHECK8-NEXT:    [[IT:%.*]] = alloca i64, align 8
11951 // CHECK8-NEXT:    [[LIN2:%.*]] = alloca i32, align 4
11952 // CHECK8-NEXT:    [[A3:%.*]] = alloca i32, align 4
11953 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11954 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11955 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
11956 // CHECK8-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
11957 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
11958 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
11959 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
11960 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
11961 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
11962 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4
11963 // CHECK8-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
11964 // CHECK8-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
11965 // CHECK8-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
11966 // CHECK8-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
11967 // CHECK8-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
11968 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11969 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11970 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
11971 // CHECK8-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]])
11972 // CHECK8-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
11973 // CHECK8-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
11974 // CHECK8-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
11975 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11976 // CHECK8:       cond.true:
11977 // CHECK8-NEXT:    br label [[COND_END:%.*]]
11978 // CHECK8:       cond.false:
11979 // CHECK8-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
11980 // CHECK8-NEXT:    br label [[COND_END]]
11981 // CHECK8:       cond.end:
11982 // CHECK8-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
11983 // CHECK8-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
11984 // CHECK8-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
11985 // CHECK8-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
11986 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11987 // CHECK8:       omp.inner.for.cond:
11988 // CHECK8-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
11989 // CHECK8-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30
11990 // CHECK8-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
11991 // CHECK8-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11992 // CHECK8:       omp.inner.for.body:
11993 // CHECK8-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
11994 // CHECK8-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
11995 // CHECK8-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
11996 // CHECK8-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !30
11997 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !30
11998 // CHECK8-NEXT:    [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
11999 // CHECK8-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
12000 // CHECK8-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30
12001 // CHECK8-NEXT:    [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]]
12002 // CHECK8-NEXT:    [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]]
12003 // CHECK8-NEXT:    [[CONV7:%.*]] = trunc i64 [[ADD]] to i32
12004 // CHECK8-NEXT:    store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !30
12005 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !30
12006 // CHECK8-NEXT:    [[CONV8:%.*]] = sext i32 [[TMP13]] to i64
12007 // CHECK8-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
12008 // CHECK8-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !30
12009 // CHECK8-NEXT:    [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]]
12010 // CHECK8-NEXT:    [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]]
12011 // CHECK8-NEXT:    [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32
12012 // CHECK8-NEXT:    store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !30
12013 // CHECK8-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
12014 // CHECK8-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP16]] to i32
12015 // CHECK8-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
12016 // CHECK8-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
12017 // CHECK8-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !30
12018 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12019 // CHECK8:       omp.body.continue:
12020 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12021 // CHECK8:       omp.inner.for.inc:
12022 // CHECK8-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
12023 // CHECK8-NEXT:    [[ADD15:%.*]] = add i64 [[TMP17]], 1
12024 // CHECK8-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
12025 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
12026 // CHECK8:       omp.inner.for.end:
12027 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12028 // CHECK8:       omp.loop.exit:
12029 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
12030 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12031 // CHECK8-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
12032 // CHECK8-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12033 // CHECK8:       .omp.final.then:
12034 // CHECK8-NEXT:    store i64 400, i64* [[IT]], align 8
12035 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12036 // CHECK8:       .omp.final.done:
12037 // CHECK8-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12038 // CHECK8-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
12039 // CHECK8-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
12040 // CHECK8:       .omp.linear.pu:
12041 // CHECK8-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4
12042 // CHECK8-NEXT:    store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4
12043 // CHECK8-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A3]], align 4
12044 // CHECK8-NEXT:    store i32 [[TMP23]], i32* [[A_ADDR]], align 4
12045 // CHECK8-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
12046 // CHECK8:       .omp.linear.pu.done:
12047 // CHECK8-NEXT:    ret void
12048 //
12049 //
12050 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
12051 // CHECK8-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] {
12052 // CHECK8-NEXT:  entry:
12053 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12054 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12055 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12056 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12057 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12058 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12059 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12060 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
12061 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
12062 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
12063 // CHECK8-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
12064 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12065 // CHECK8-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
12066 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12067 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
12068 // CHECK8-NEXT:    ret void
12069 //
12070 //
12071 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..4
12072 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] {
12073 // CHECK8-NEXT:  entry:
12074 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12075 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12076 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12077 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12078 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12079 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i16, align 2
12080 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12081 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12082 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12083 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12084 // CHECK8-NEXT:    [[IT:%.*]] = alloca i16, align 2
12085 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12086 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12087 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12088 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12089 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12090 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12091 // CHECK8-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
12092 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12093 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12094 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12095 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
12096 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12097 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12098 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
12099 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12100 // CHECK8:       cond.true:
12101 // CHECK8-NEXT:    br label [[COND_END:%.*]]
12102 // CHECK8:       cond.false:
12103 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12104 // CHECK8-NEXT:    br label [[COND_END]]
12105 // CHECK8:       cond.end:
12106 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
12107 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12108 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12109 // CHECK8-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
12110 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12111 // CHECK8:       omp.inner.for.cond:
12112 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
12113 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33
12114 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
12115 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12116 // CHECK8:       omp.inner.for.body:
12117 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
12118 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
12119 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
12120 // CHECK8-NEXT:    [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
12121 // CHECK8-NEXT:    store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !33
12122 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33
12123 // CHECK8-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
12124 // CHECK8-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !33
12125 // CHECK8-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !33
12126 // CHECK8-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
12127 // CHECK8-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
12128 // CHECK8-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
12129 // CHECK8-NEXT:    store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group !33
12130 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12131 // CHECK8:       omp.body.continue:
12132 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12133 // CHECK8:       omp.inner.for.inc:
12134 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
12135 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
12136 // CHECK8-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
12137 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
12138 // CHECK8:       omp.inner.for.end:
12139 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12140 // CHECK8:       omp.loop.exit:
12141 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
12142 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12143 // CHECK8-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
12144 // CHECK8-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12145 // CHECK8:       .omp.final.then:
12146 // CHECK8-NEXT:    store i16 22, i16* [[IT]], align 2
12147 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12148 // CHECK8:       .omp.final.done:
12149 // CHECK8-NEXT:    ret void
12150 //
12151 //
12152 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
12153 // CHECK8-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12154 // CHECK8-NEXT:  entry:
12155 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12156 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
12157 // CHECK8-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
12158 // CHECK8-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
12159 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
12160 // CHECK8-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
12161 // CHECK8-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
12162 // CHECK8-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
12163 // CHECK8-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
12164 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12165 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12166 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12167 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12168 // CHECK8-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
12169 // CHECK8-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
12170 // CHECK8-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
12171 // CHECK8-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
12172 // CHECK8-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
12173 // CHECK8-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
12174 // CHECK8-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
12175 // CHECK8-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
12176 // CHECK8-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12177 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
12178 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
12179 // CHECK8-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
12180 // CHECK8-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
12181 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
12182 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
12183 // CHECK8-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
12184 // CHECK8-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
12185 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
12186 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
12187 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
12188 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12189 // CHECK8-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
12190 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
12191 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]])
12192 // CHECK8-NEXT:    ret void
12193 //
12194 //
12195 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..7
12196 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
12197 // CHECK8-NEXT:  entry:
12198 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12199 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12200 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12201 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
12202 // CHECK8-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
12203 // CHECK8-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
12204 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
12205 // CHECK8-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
12206 // CHECK8-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
12207 // CHECK8-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
12208 // CHECK8-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
12209 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12210 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12211 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i8, align 1
12212 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12213 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12214 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12215 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12216 // CHECK8-NEXT:    [[IT:%.*]] = alloca i8, align 1
12217 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12218 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12219 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12220 // CHECK8-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
12221 // CHECK8-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
12222 // CHECK8-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
12223 // CHECK8-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
12224 // CHECK8-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
12225 // CHECK8-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
12226 // CHECK8-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
12227 // CHECK8-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
12228 // CHECK8-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12229 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
12230 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
12231 // CHECK8-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
12232 // CHECK8-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
12233 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
12234 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
12235 // CHECK8-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
12236 // CHECK8-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
12237 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12238 // CHECK8-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
12239 // CHECK8-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12240 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12241 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12242 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12243 // CHECK8-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
12244 // CHECK8-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
12245 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
12246 // CHECK8:       omp.dispatch.cond:
12247 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12248 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
12249 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12250 // CHECK8:       cond.true:
12251 // CHECK8-NEXT:    br label [[COND_END:%.*]]
12252 // CHECK8:       cond.false:
12253 // CHECK8-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12254 // CHECK8-NEXT:    br label [[COND_END]]
12255 // CHECK8:       cond.end:
12256 // CHECK8-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
12257 // CHECK8-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12258 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12259 // CHECK8-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
12260 // CHECK8-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
12261 // CHECK8-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12262 // CHECK8-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
12263 // CHECK8-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12264 // CHECK8:       omp.dispatch.body:
12265 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12266 // CHECK8:       omp.inner.for.cond:
12267 // CHECK8-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
12268 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
12269 // CHECK8-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
12270 // CHECK8-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12271 // CHECK8:       omp.inner.for.body:
12272 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
12273 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
12274 // CHECK8-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
12275 // CHECK8-NEXT:    [[CONV:%.*]] = trunc i32 [[SUB]] to i8
12276 // CHECK8-NEXT:    store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !36
12277 // CHECK8-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !36
12278 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
12279 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !36
12280 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
12281 // CHECK8-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !36
12282 // CHECK8-NEXT:    [[CONV7:%.*]] = fpext float [[TMP20]] to double
12283 // CHECK8-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
12284 // CHECK8-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
12285 // CHECK8-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !36
12286 // CHECK8-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
12287 // CHECK8-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
12288 // CHECK8-NEXT:    [[CONV11:%.*]] = fpext float [[TMP21]] to double
12289 // CHECK8-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
12290 // CHECK8-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
12291 // CHECK8-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !36
12292 // CHECK8-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
12293 // CHECK8-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2
12294 // CHECK8-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
12295 // CHECK8-NEXT:    [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00
12296 // CHECK8-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !36
12297 // CHECK8-NEXT:    [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]]
12298 // CHECK8-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]]
12299 // CHECK8-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3
12300 // CHECK8-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
12301 // CHECK8-NEXT:    [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00
12302 // CHECK8-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !36
12303 // CHECK8-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
12304 // CHECK8-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !36
12305 // CHECK8-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1
12306 // CHECK8-NEXT:    store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !36
12307 // CHECK8-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
12308 // CHECK8-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !36
12309 // CHECK8-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP26]] to i32
12310 // CHECK8-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
12311 // CHECK8-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
12312 // CHECK8-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !36
12313 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12314 // CHECK8:       omp.body.continue:
12315 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12316 // CHECK8:       omp.inner.for.inc:
12317 // CHECK8-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
12318 // CHECK8-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1
12319 // CHECK8-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
12320 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
12321 // CHECK8:       omp.inner.for.end:
12322 // CHECK8-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
12323 // CHECK8:       omp.dispatch.inc:
12324 // CHECK8-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12325 // CHECK8-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12326 // CHECK8-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
12327 // CHECK8-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4
12328 // CHECK8-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12329 // CHECK8-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
12330 // CHECK8-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
12331 // CHECK8-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4
12332 // CHECK8-NEXT:    br label [[OMP_DISPATCH_COND]]
12333 // CHECK8:       omp.dispatch.end:
12334 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
12335 // CHECK8-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12336 // CHECK8-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
12337 // CHECK8-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12338 // CHECK8:       .omp.final.then:
12339 // CHECK8-NEXT:    store i8 96, i8* [[IT]], align 1
12340 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12341 // CHECK8:       .omp.final.done:
12342 // CHECK8-NEXT:    ret void
12343 //
12344 //
12345 // CHECK8-LABEL: define {{[^@]+}}@_Z3bari
12346 // CHECK8-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
12347 // CHECK8-NEXT:  entry:
12348 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12349 // CHECK8-NEXT:    [[A:%.*]] = alloca i32, align 4
12350 // CHECK8-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
12351 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12352 // CHECK8-NEXT:    store i32 0, i32* [[A]], align 4
12353 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
12354 // CHECK8-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
12355 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
12356 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
12357 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
12358 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
12359 // CHECK8-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
12360 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
12361 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
12362 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
12363 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
12364 // CHECK8-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
12365 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
12366 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
12367 // CHECK8-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
12368 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
12369 // CHECK8-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
12370 // CHECK8-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
12371 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
12372 // CHECK8-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
12373 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
12374 // CHECK8-NEXT:    ret i32 [[TMP8]]
12375 //
12376 //
12377 // CHECK8-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
12378 // CHECK8-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
12379 // CHECK8-NEXT:  entry:
12380 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
12381 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12382 // CHECK8-NEXT:    [[B:%.*]] = alloca i32, align 4
12383 // CHECK8-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
12384 // CHECK8-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
12385 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
12386 // CHECK8-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
12387 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12388 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4
12389 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4
12390 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4
12391 // CHECK8-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4
12392 // CHECK8-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
12393 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12394 // CHECK8-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
12395 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
12396 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
12397 // CHECK8-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
12398 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
12399 // CHECK8-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
12400 // CHECK8-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
12401 // CHECK8-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
12402 // CHECK8-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
12403 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
12404 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
12405 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
12406 // CHECK8-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
12407 // CHECK8-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
12408 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B]], align 4
12409 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[B_CASTED]], align 4
12410 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
12411 // CHECK8-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
12412 // CHECK8-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
12413 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
12414 // CHECK8-NEXT:    [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8
12415 // CHECK8-NEXT:    store i8 [[FROMBOOL2]], i8* [[CONV]], align 1
12416 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
12417 // CHECK8-NEXT:    [[TMP9:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
12418 // CHECK8-NEXT:    [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1
12419 // CHECK8-NEXT:    br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
12420 // CHECK8:       omp_if.then:
12421 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
12422 // CHECK8-NEXT:    [[TMP10:%.*]] = mul nuw i32 2, [[TMP1]]
12423 // CHECK8-NEXT:    [[TMP11:%.*]] = mul nuw i32 [[TMP10]], 2
12424 // CHECK8-NEXT:    [[TMP12:%.*]] = sext i32 [[TMP11]] to i64
12425 // CHECK8-NEXT:    [[TMP13:%.*]] = bitcast [6 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
12426 // CHECK8-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP13]], i8* align 4 bitcast ([6 x i64]* @.offload_sizes.11 to i8*), i32 48, i1 false)
12427 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12428 // CHECK8-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.S1**
12429 // CHECK8-NEXT:    store %struct.S1* [[THIS1]], %struct.S1** [[TMP15]], align 4
12430 // CHECK8-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12431 // CHECK8-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to double**
12432 // CHECK8-NEXT:    store double* [[A]], double** [[TMP17]], align 4
12433 // CHECK8-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
12434 // CHECK8-NEXT:    store i8* null, i8** [[TMP18]], align 4
12435 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
12436 // CHECK8-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
12437 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[TMP20]], align 4
12438 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
12439 // CHECK8-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32*
12440 // CHECK8-NEXT:    store i32 [[TMP6]], i32* [[TMP22]], align 4
12441 // CHECK8-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
12442 // CHECK8-NEXT:    store i8* null, i8** [[TMP23]], align 4
12443 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
12444 // CHECK8-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32*
12445 // CHECK8-NEXT:    store i32 2, i32* [[TMP25]], align 4
12446 // CHECK8-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
12447 // CHECK8-NEXT:    [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
12448 // CHECK8-NEXT:    store i32 2, i32* [[TMP27]], align 4
12449 // CHECK8-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
12450 // CHECK8-NEXT:    store i8* null, i8** [[TMP28]], align 4
12451 // CHECK8-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
12452 // CHECK8-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32*
12453 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP30]], align 4
12454 // CHECK8-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
12455 // CHECK8-NEXT:    [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32*
12456 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP32]], align 4
12457 // CHECK8-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
12458 // CHECK8-NEXT:    store i8* null, i8** [[TMP33]], align 4
12459 // CHECK8-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
12460 // CHECK8-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16**
12461 // CHECK8-NEXT:    store i16* [[VLA]], i16** [[TMP35]], align 4
12462 // CHECK8-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
12463 // CHECK8-NEXT:    [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16**
12464 // CHECK8-NEXT:    store i16* [[VLA]], i16** [[TMP37]], align 4
12465 // CHECK8-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
12466 // CHECK8-NEXT:    store i64 [[TMP12]], i64* [[TMP38]], align 4
12467 // CHECK8-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
12468 // CHECK8-NEXT:    store i8* null, i8** [[TMP39]], align 4
12469 // CHECK8-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
12470 // CHECK8-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
12471 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[TMP41]], align 4
12472 // CHECK8-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
12473 // CHECK8-NEXT:    [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32*
12474 // CHECK8-NEXT:    store i32 [[TMP8]], i32* [[TMP43]], align 4
12475 // CHECK8-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5
12476 // CHECK8-NEXT:    store i8* null, i8** [[TMP44]], align 4
12477 // CHECK8-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12478 // CHECK8-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12479 // CHECK8-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
12480 // CHECK8-NEXT:    [[TMP48:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
12481 // CHECK8-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP48]] to i1
12482 // CHECK8-NEXT:    [[TMP49:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1
12483 // CHECK8-NEXT:    [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP45]], i8** [[TMP46]], i64* [[TMP47]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP49]])
12484 // CHECK8-NEXT:    [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0
12485 // CHECK8-NEXT:    br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
12486 // CHECK8:       omp_offload.failed:
12487 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]]
12488 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
12489 // CHECK8:       omp_offload.cont:
12490 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
12491 // CHECK8:       omp_if.else:
12492 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]]
12493 // CHECK8-NEXT:    br label [[OMP_IF_END]]
12494 // CHECK8:       omp_if.end:
12495 // CHECK8-NEXT:    [[TMP52:%.*]] = mul nsw i32 1, [[TMP1]]
12496 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP52]]
12497 // CHECK8-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
12498 // CHECK8-NEXT:    [[TMP53:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2
12499 // CHECK8-NEXT:    [[CONV6:%.*]] = sext i16 [[TMP53]] to i32
12500 // CHECK8-NEXT:    [[TMP54:%.*]] = load i32, i32* [[B]], align 4
12501 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP54]]
12502 // CHECK8-NEXT:    [[TMP55:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
12503 // CHECK8-NEXT:    call void @llvm.stackrestore(i8* [[TMP55]])
12504 // CHECK8-NEXT:    ret i32 [[ADD7]]
12505 //
12506 //
12507 // CHECK8-LABEL: define {{[^@]+}}@_ZL7fstatici
12508 // CHECK8-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
12509 // CHECK8-NEXT:  entry:
12510 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12511 // CHECK8-NEXT:    [[A:%.*]] = alloca i32, align 4
12512 // CHECK8-NEXT:    [[AA:%.*]] = alloca i16, align 2
12513 // CHECK8-NEXT:    [[AAA:%.*]] = alloca i8, align 1
12514 // CHECK8-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
12515 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12516 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12517 // CHECK8-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
12518 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
12519 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
12520 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
12521 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12522 // CHECK8-NEXT:    store i32 0, i32* [[A]], align 4
12523 // CHECK8-NEXT:    store i16 0, i16* [[AA]], align 2
12524 // CHECK8-NEXT:    store i8 0, i8* [[AAA]], align 1
12525 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
12526 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
12527 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
12528 // CHECK8-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
12529 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12530 // CHECK8-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
12531 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12532 // CHECK8-NEXT:    [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1
12533 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
12534 // CHECK8-NEXT:    store i8 [[TMP4]], i8* [[CONV1]], align 1
12535 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
12536 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
12537 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50
12538 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
12539 // CHECK8:       omp_if.then:
12540 // CHECK8-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12541 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
12542 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
12543 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12544 // CHECK8-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32*
12545 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
12546 // CHECK8-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
12547 // CHECK8-NEXT:    store i8* null, i8** [[TMP11]], align 4
12548 // CHECK8-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
12549 // CHECK8-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
12550 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
12551 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
12552 // CHECK8-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
12553 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP15]], align 4
12554 // CHECK8-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
12555 // CHECK8-NEXT:    store i8* null, i8** [[TMP16]], align 4
12556 // CHECK8-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
12557 // CHECK8-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32*
12558 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[TMP18]], align 4
12559 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
12560 // CHECK8-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32*
12561 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[TMP20]], align 4
12562 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
12563 // CHECK8-NEXT:    store i8* null, i8** [[TMP21]], align 4
12564 // CHECK8-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
12565 // CHECK8-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]**
12566 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4
12567 // CHECK8-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
12568 // CHECK8-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]**
12569 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4
12570 // CHECK8-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
12571 // CHECK8-NEXT:    store i8* null, i8** [[TMP26]], align 4
12572 // CHECK8-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12573 // CHECK8-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12574 // CHECK8-NEXT:    [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
12575 // CHECK8-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
12576 // CHECK8-NEXT:    br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
12577 // CHECK8:       omp_offload.failed:
12578 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
12579 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
12580 // CHECK8:       omp_offload.cont:
12581 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
12582 // CHECK8:       omp_if.else:
12583 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]]
12584 // CHECK8-NEXT:    br label [[OMP_IF_END]]
12585 // CHECK8:       omp_if.end:
12586 // CHECK8-NEXT:    [[TMP31:%.*]] = load i32, i32* [[A]], align 4
12587 // CHECK8-NEXT:    ret i32 [[TMP31]]
12588 //
12589 //
12590 // CHECK8-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
12591 // CHECK8-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
12592 // CHECK8-NEXT:  entry:
12593 // CHECK8-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12594 // CHECK8-NEXT:    [[A:%.*]] = alloca i32, align 4
12595 // CHECK8-NEXT:    [[AA:%.*]] = alloca i16, align 2
12596 // CHECK8-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
12597 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12598 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12599 // CHECK8-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
12600 // CHECK8-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
12601 // CHECK8-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
12602 // CHECK8-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12603 // CHECK8-NEXT:    store i32 0, i32* [[A]], align 4
12604 // CHECK8-NEXT:    store i16 0, i16* [[AA]], align 2
12605 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
12606 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
12607 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
12608 // CHECK8-NEXT:    [[TMP2:%.*]] = load i16, i16* [[AA]], align 2
12609 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12610 // CHECK8-NEXT:    store i16 [[TMP2]], i16* [[CONV]], align 2
12611 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12612 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
12613 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40
12614 // CHECK8-NEXT:    br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
12615 // CHECK8:       omp_if.then:
12616 // CHECK8-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12617 // CHECK8-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
12618 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
12619 // CHECK8-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12620 // CHECK8-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
12621 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
12622 // CHECK8-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
12623 // CHECK8-NEXT:    store i8* null, i8** [[TMP9]], align 4
12624 // CHECK8-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
12625 // CHECK8-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32*
12626 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP11]], align 4
12627 // CHECK8-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
12628 // CHECK8-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
12629 // CHECK8-NEXT:    store i32 [[TMP3]], i32* [[TMP13]], align 4
12630 // CHECK8-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
12631 // CHECK8-NEXT:    store i8* null, i8** [[TMP14]], align 4
12632 // CHECK8-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
12633 // CHECK8-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]**
12634 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4
12635 // CHECK8-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
12636 // CHECK8-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]**
12637 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4
12638 // CHECK8-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
12639 // CHECK8-NEXT:    store i8* null, i8** [[TMP19]], align 4
12640 // CHECK8-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12641 // CHECK8-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12642 // CHECK8-NEXT:    [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
12643 // CHECK8-NEXT:    [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
12644 // CHECK8-NEXT:    br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
12645 // CHECK8:       omp_offload.failed:
12646 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
12647 // CHECK8-NEXT:    br label [[OMP_OFFLOAD_CONT]]
12648 // CHECK8:       omp_offload.cont:
12649 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
12650 // CHECK8:       omp_if.else:
12651 // CHECK8-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]]
12652 // CHECK8-NEXT:    br label [[OMP_IF_END]]
12653 // CHECK8:       omp_if.end:
12654 // CHECK8-NEXT:    [[TMP24:%.*]] = load i32, i32* [[A]], align 4
12655 // CHECK8-NEXT:    ret i32 [[TMP24]]
12656 //
12657 //
12658 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214
12659 // CHECK8-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12660 // CHECK8-NEXT:  entry:
12661 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
12662 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
12663 // CHECK8-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
12664 // CHECK8-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
12665 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
12666 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12667 // CHECK8-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
12668 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12669 // CHECK8-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
12670 // CHECK8-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
12671 // CHECK8-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
12672 // CHECK8-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
12673 // CHECK8-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
12674 // CHECK8-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
12675 // CHECK8-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
12676 // CHECK8-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
12677 // CHECK8-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12678 // CHECK8-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
12679 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
12680 // CHECK8-NEXT:    [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
12681 // CHECK8-NEXT:    [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4
12682 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
12683 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4
12684 // CHECK8-NEXT:    store i32 [[TMP5]], i32* [[B_CASTED]], align 4
12685 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
12686 // CHECK8-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV]], align 1
12687 // CHECK8-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
12688 // CHECK8-NEXT:    [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
12689 // CHECK8-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
12690 // CHECK8-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV3]], align 1
12691 // CHECK8-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
12692 // CHECK8-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1
12693 // CHECK8-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1
12694 // CHECK8-NEXT:    br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
12695 // CHECK8:       omp_if.then:
12696 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]])
12697 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
12698 // CHECK8:       omp_if.else:
12699 // CHECK8-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
12700 // CHECK8-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
12701 // CHECK8-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
12702 // CHECK8-NEXT:    call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR4]]
12703 // CHECK8-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
12704 // CHECK8-NEXT:    br label [[OMP_IF_END]]
12705 // CHECK8:       omp_if.end:
12706 // CHECK8-NEXT:    ret void
12707 //
12708 //
12709 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..10
12710 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
12711 // CHECK8-NEXT:  entry:
12712 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12713 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12714 // CHECK8-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
12715 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
12716 // CHECK8-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
12717 // CHECK8-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
12718 // CHECK8-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
12719 // CHECK8-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12720 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
12721 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i64, align 4
12722 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
12723 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
12724 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
12725 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12726 // CHECK8-NEXT:    [[IT:%.*]] = alloca i64, align 8
12727 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12728 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12729 // CHECK8-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
12730 // CHECK8-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
12731 // CHECK8-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
12732 // CHECK8-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
12733 // CHECK8-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
12734 // CHECK8-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
12735 // CHECK8-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
12736 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
12737 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
12738 // CHECK8-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
12739 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
12740 // CHECK8-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
12741 // CHECK8-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
12742 // CHECK8-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
12743 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12744 // CHECK8-NEXT:    [[TMP4:%.*]] = load i8, i8* [[CONV]], align 1
12745 // CHECK8-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1
12746 // CHECK8-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
12747 // CHECK8:       omp_if.then:
12748 // CHECK8-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12749 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
12750 // CHECK8-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
12751 // CHECK8-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
12752 // CHECK8-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3
12753 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12754 // CHECK8:       cond.true:
12755 // CHECK8-NEXT:    br label [[COND_END:%.*]]
12756 // CHECK8:       cond.false:
12757 // CHECK8-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
12758 // CHECK8-NEXT:    br label [[COND_END]]
12759 // CHECK8:       cond.end:
12760 // CHECK8-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
12761 // CHECK8-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
12762 // CHECK8-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
12763 // CHECK8-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8
12764 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12765 // CHECK8:       omp.inner.for.cond:
12766 // CHECK8-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
12767 // CHECK8-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !39
12768 // CHECK8-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]]
12769 // CHECK8-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12770 // CHECK8:       omp.inner.for.body:
12771 // CHECK8-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
12772 // CHECK8-NEXT:    [[MUL:%.*]] = mul i64 [[TMP12]], 400
12773 // CHECK8-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
12774 // CHECK8-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !39
12775 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !39
12776 // CHECK8-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double
12777 // CHECK8-NEXT:    [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00
12778 // CHECK8-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
12779 // CHECK8-NEXT:    store double [[ADD]], double* [[A]], align 4, !nontemporal !40, !llvm.access.group !39
12780 // CHECK8-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
12781 // CHECK8-NEXT:    [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !40, !llvm.access.group !39
12782 // CHECK8-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
12783 // CHECK8-NEXT:    store double [[INC]], double* [[A5]], align 4, !nontemporal !40, !llvm.access.group !39
12784 // CHECK8-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
12785 // CHECK8-NEXT:    [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]]
12786 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]]
12787 // CHECK8-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
12788 // CHECK8-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !39
12789 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12790 // CHECK8:       omp.body.continue:
12791 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12792 // CHECK8:       omp.inner.for.inc:
12793 // CHECK8-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
12794 // CHECK8-NEXT:    [[ADD8:%.*]] = add i64 [[TMP16]], 1
12795 // CHECK8-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !39
12796 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
12797 // CHECK8:       omp.inner.for.end:
12798 // CHECK8-NEXT:    br label [[OMP_IF_END:%.*]]
12799 // CHECK8:       omp_if.else:
12800 // CHECK8-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12801 // CHECK8-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
12802 // CHECK8-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
12803 // CHECK8-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
12804 // CHECK8-NEXT:    [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3
12805 // CHECK8-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
12806 // CHECK8:       cond.true10:
12807 // CHECK8-NEXT:    br label [[COND_END12:%.*]]
12808 // CHECK8:       cond.false11:
12809 // CHECK8-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
12810 // CHECK8-NEXT:    br label [[COND_END12]]
12811 // CHECK8:       cond.end12:
12812 // CHECK8-NEXT:    [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ]
12813 // CHECK8-NEXT:    store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8
12814 // CHECK8-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
12815 // CHECK8-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8
12816 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND14:%.*]]
12817 // CHECK8:       omp.inner.for.cond14:
12818 // CHECK8-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
12819 // CHECK8-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
12820 // CHECK8-NEXT:    [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
12821 // CHECK8-NEXT:    br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]]
12822 // CHECK8:       omp.inner.for.body16:
12823 // CHECK8-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
12824 // CHECK8-NEXT:    [[MUL17:%.*]] = mul i64 [[TMP24]], 400
12825 // CHECK8-NEXT:    [[SUB18:%.*]] = sub i64 2000, [[MUL17]]
12826 // CHECK8-NEXT:    store i64 [[SUB18]], i64* [[IT]], align 8
12827 // CHECK8-NEXT:    [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4
12828 // CHECK8-NEXT:    [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double
12829 // CHECK8-NEXT:    [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00
12830 // CHECK8-NEXT:    [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
12831 // CHECK8-NEXT:    store double [[ADD20]], double* [[A21]], align 4
12832 // CHECK8-NEXT:    [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
12833 // CHECK8-NEXT:    [[TMP26:%.*]] = load double, double* [[A22]], align 4
12834 // CHECK8-NEXT:    [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00
12835 // CHECK8-NEXT:    store double [[INC23]], double* [[A22]], align 4
12836 // CHECK8-NEXT:    [[CONV24:%.*]] = fptosi double [[INC23]] to i16
12837 // CHECK8-NEXT:    [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]]
12838 // CHECK8-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]]
12839 // CHECK8-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1
12840 // CHECK8-NEXT:    store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2
12841 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE27:%.*]]
12842 // CHECK8:       omp.body.continue27:
12843 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC28:%.*]]
12844 // CHECK8:       omp.inner.for.inc28:
12845 // CHECK8-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
12846 // CHECK8-NEXT:    [[ADD29:%.*]] = add i64 [[TMP28]], 1
12847 // CHECK8-NEXT:    store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8
12848 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP43:![0-9]+]]
12849 // CHECK8:       omp.inner.for.end30:
12850 // CHECK8-NEXT:    br label [[OMP_IF_END]]
12851 // CHECK8:       omp_if.end:
12852 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12853 // CHECK8:       omp.loop.exit:
12854 // CHECK8-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12855 // CHECK8-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
12856 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
12857 // CHECK8-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12858 // CHECK8-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12859 // CHECK8-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12860 // CHECK8:       .omp.final.then:
12861 // CHECK8-NEXT:    store i64 400, i64* [[IT]], align 8
12862 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12863 // CHECK8:       .omp.final.done:
12864 // CHECK8-NEXT:    ret void
12865 //
12866 //
12867 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
12868 // CHECK8-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
12869 // CHECK8-NEXT:  entry:
12870 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12871 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12872 // CHECK8-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
12873 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
12874 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12875 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12876 // CHECK8-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
12877 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12878 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12879 // CHECK8-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
12880 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
12881 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12882 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
12883 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
12884 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
12885 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
12886 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
12887 // CHECK8-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
12888 // CHECK8-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12889 // CHECK8-NEXT:    store i16 [[TMP3]], i16* [[CONV2]], align 2
12890 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12891 // CHECK8-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1
12892 // CHECK8-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
12893 // CHECK8-NEXT:    store i8 [[TMP5]], i8* [[CONV3]], align 1
12894 // CHECK8-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
12895 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
12896 // CHECK8-NEXT:    ret void
12897 //
12898 //
12899 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..13
12900 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
12901 // CHECK8-NEXT:  entry:
12902 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12903 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12904 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12905 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12906 // CHECK8-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
12907 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
12908 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12909 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12910 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12911 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12912 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12913 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12914 // CHECK8-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
12915 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
12916 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12917 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
12918 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
12919 // CHECK8-NEXT:    ret void
12920 //
12921 //
12922 // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
12923 // CHECK8-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] {
12924 // CHECK8-NEXT:  entry:
12925 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12926 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12927 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
12928 // CHECK8-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
12929 // CHECK8-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
12930 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12931 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12932 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
12933 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12934 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
12935 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
12936 // CHECK8-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
12937 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
12938 // CHECK8-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
12939 // CHECK8-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
12940 // CHECK8-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
12941 // CHECK8-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
12942 // CHECK8-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
12943 // CHECK8-NEXT:    ret void
12944 //
12945 //
12946 // CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..16
12947 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] {
12948 // CHECK8-NEXT:  entry:
12949 // CHECK8-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12950 // CHECK8-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12951 // CHECK8-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
12952 // CHECK8-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
12953 // CHECK8-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
12954 // CHECK8-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
12955 // CHECK8-NEXT:    [[TMP:%.*]] = alloca i64, align 4
12956 // CHECK8-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
12957 // CHECK8-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
12958 // CHECK8-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
12959 // CHECK8-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12960 // CHECK8-NEXT:    [[I:%.*]] = alloca i64, align 8
12961 // CHECK8-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12962 // CHECK8-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12963 // CHECK8-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12964 // CHECK8-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
12965 // CHECK8-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
12966 // CHECK8-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
12967 // CHECK8-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
12968 // CHECK8-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
12969 // CHECK8-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
12970 // CHECK8-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
12971 // CHECK8-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12972 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12973 // CHECK8-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
12974 // CHECK8-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
12975 // CHECK8-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
12976 // CHECK8-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
12977 // CHECK8-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12978 // CHECK8:       cond.true:
12979 // CHECK8-NEXT:    br label [[COND_END:%.*]]
12980 // CHECK8:       cond.false:
12981 // CHECK8-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
12982 // CHECK8-NEXT:    br label [[COND_END]]
12983 // CHECK8:       cond.end:
12984 // CHECK8-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
12985 // CHECK8-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
12986 // CHECK8-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
12987 // CHECK8-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
12988 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12989 // CHECK8:       omp.inner.for.cond:
12990 // CHECK8-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45
12991 // CHECK8-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !45
12992 // CHECK8-NEXT:    [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
12993 // CHECK8-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12994 // CHECK8:       omp.inner.for.body:
12995 // CHECK8-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45
12996 // CHECK8-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
12997 // CHECK8-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
12998 // CHECK8-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !45
12999 // CHECK8-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !45
13000 // CHECK8-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
13001 // CHECK8-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !45
13002 // CHECK8-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !45
13003 // CHECK8-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
13004 // CHECK8-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
13005 // CHECK8-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
13006 // CHECK8-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !45
13007 // CHECK8-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
13008 // CHECK8-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
13009 // CHECK8-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
13010 // CHECK8-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !45
13011 // CHECK8-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13012 // CHECK8:       omp.body.continue:
13013 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13014 // CHECK8:       omp.inner.for.inc:
13015 // CHECK8-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45
13016 // CHECK8-NEXT:    [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1
13017 // CHECK8-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !45
13018 // CHECK8-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
13019 // CHECK8:       omp.inner.for.end:
13020 // CHECK8-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13021 // CHECK8:       omp.loop.exit:
13022 // CHECK8-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
13023 // CHECK8-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13024 // CHECK8-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
13025 // CHECK8-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13026 // CHECK8:       .omp.final.then:
13027 // CHECK8-NEXT:    store i64 11, i64* [[I]], align 8
13028 // CHECK8-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13029 // CHECK8:       .omp.final.done:
13030 // CHECK8-NEXT:    ret void
13031 //
13032 //
13033 // CHECK8-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
13034 // CHECK8-SAME: () #[[ATTR8:[0-9]+]] {
13035 // CHECK8-NEXT:  entry:
13036 // CHECK8-NEXT:    call void @__tgt_register_requires(i64 1)
13037 // CHECK8-NEXT:    ret void
13038 //
13039 //
13040 // CHECK9-LABEL: define {{[^@]+}}@_Z7get_valv
13041 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
13042 // CHECK9-NEXT:  entry:
13043 // CHECK9-NEXT:    ret i64 0
13044 //
13045 //
13046 // CHECK9-LABEL: define {{[^@]+}}@_Z3fooi
13047 // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
13048 // CHECK9-NEXT:  entry:
13049 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13050 // CHECK9-NEXT:    [[A:%.*]] = alloca i32, align 4
13051 // CHECK9-NEXT:    [[AA:%.*]] = alloca i16, align 2
13052 // CHECK9-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
13053 // CHECK9-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
13054 // CHECK9-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
13055 // CHECK9-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
13056 // CHECK9-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
13057 // CHECK9-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
13058 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13059 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13060 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13061 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13062 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
13063 // CHECK9-NEXT:    [[K:%.*]] = alloca i64, align 8
13064 // CHECK9-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
13065 // CHECK9-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
13066 // CHECK9-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
13067 // CHECK9-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
13068 // CHECK9-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
13069 // CHECK9-NEXT:    [[I7:%.*]] = alloca i32, align 4
13070 // CHECK9-NEXT:    [[K8:%.*]] = alloca i64, align 8
13071 // CHECK9-NEXT:    [[LIN:%.*]] = alloca i32, align 4
13072 // CHECK9-NEXT:    [[_TMP20:%.*]] = alloca i64, align 8
13073 // CHECK9-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
13074 // CHECK9-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
13075 // CHECK9-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
13076 // CHECK9-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
13077 // CHECK9-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
13078 // CHECK9-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
13079 // CHECK9-NEXT:    [[IT:%.*]] = alloca i64, align 8
13080 // CHECK9-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
13081 // CHECK9-NEXT:    [[A28:%.*]] = alloca i32, align 4
13082 // CHECK9-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
13083 // CHECK9-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
13084 // CHECK9-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
13085 // CHECK9-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
13086 // CHECK9-NEXT:    [[IT53:%.*]] = alloca i16, align 2
13087 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13088 // CHECK9-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
13089 // CHECK9-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
13090 // CHECK9-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
13091 // CHECK9-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
13092 // CHECK9-NEXT:    [[IT72:%.*]] = alloca i8, align 1
13093 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13094 // CHECK9-NEXT:    store i32 0, i32* [[A]], align 4
13095 // CHECK9-NEXT:    store i16 0, i16* [[AA]], align 2
13096 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
13097 // CHECK9-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
13098 // CHECK9-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
13099 // CHECK9-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
13100 // CHECK9-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
13101 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
13102 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
13103 // CHECK9-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
13104 // CHECK9-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
13105 // CHECK9-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
13106 // CHECK9-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
13107 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13108 // CHECK9-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
13109 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13110 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
13111 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13112 // CHECK9:       omp.inner.for.cond:
13113 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
13114 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
13115 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
13116 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13117 // CHECK9:       omp.inner.for.body:
13118 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
13119 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5
13120 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
13121 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
13122 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13123 // CHECK9:       omp.body.continue:
13124 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13125 // CHECK9:       omp.inner.for.inc:
13126 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
13127 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
13128 // CHECK9-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
13129 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
13130 // CHECK9:       omp.inner.for.end:
13131 // CHECK9-NEXT:    store i32 33, i32* [[I]], align 4
13132 // CHECK9-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
13133 // CHECK9-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
13134 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
13135 // CHECK9-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
13136 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
13137 // CHECK9-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4
13138 // CHECK9-NEXT:    [[TMP12:%.*]] = load i64, i64* [[K]], align 8
13139 // CHECK9-NEXT:    store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8
13140 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
13141 // CHECK9:       omp.inner.for.cond9:
13142 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13143 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
13144 // CHECK9-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
13145 // CHECK9-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
13146 // CHECK9:       omp.inner.for.body11:
13147 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13148 // CHECK9-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1
13149 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
13150 // CHECK9-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6
13151 // CHECK9-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6
13152 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13153 // CHECK9-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3
13154 // CHECK9-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
13155 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]]
13156 // CHECK9-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6
13157 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6
13158 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
13159 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6
13160 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
13161 // CHECK9:       omp.body.continue16:
13162 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
13163 // CHECK9:       omp.inner.for.inc17:
13164 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13165 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
13166 // CHECK9-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13167 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]]
13168 // CHECK9:       omp.inner.for.end19:
13169 // CHECK9-NEXT:    store i32 1, i32* [[I7]], align 4
13170 // CHECK9-NEXT:    [[TMP20:%.*]] = load i64, i64* [[K8]], align 8
13171 // CHECK9-NEXT:    store i64 [[TMP20]], i64* [[K]], align 8
13172 // CHECK9-NEXT:    store i32 12, i32* [[LIN]], align 4
13173 // CHECK9-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
13174 // CHECK9-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
13175 // CHECK9-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
13176 // CHECK9-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8
13177 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4
13178 // CHECK9-NEXT:    store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4
13179 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A]], align 4
13180 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4
13181 // CHECK9-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
13182 // CHECK9-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
13183 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
13184 // CHECK9:       omp.inner.for.cond29:
13185 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13186 // CHECK9-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9
13187 // CHECK9-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]]
13188 // CHECK9-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
13189 // CHECK9:       omp.inner.for.body31:
13190 // CHECK9-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13191 // CHECK9-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP26]], 400
13192 // CHECK9-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
13193 // CHECK9-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9
13194 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9
13195 // CHECK9-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP27]] to i64
13196 // CHECK9-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13197 // CHECK9-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
13198 // CHECK9-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]]
13199 // CHECK9-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
13200 // CHECK9-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
13201 // CHECK9-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9
13202 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9
13203 // CHECK9-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP30]] to i64
13204 // CHECK9-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13205 // CHECK9-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
13206 // CHECK9-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]]
13207 // CHECK9-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
13208 // CHECK9-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
13209 // CHECK9-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9
13210 // CHECK9-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
13211 // CHECK9-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP33]] to i32
13212 // CHECK9-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
13213 // CHECK9-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
13214 // CHECK9-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9
13215 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
13216 // CHECK9:       omp.body.continue45:
13217 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
13218 // CHECK9:       omp.inner.for.inc46:
13219 // CHECK9-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13220 // CHECK9-NEXT:    [[ADD47:%.*]] = add i64 [[TMP34]], 1
13221 // CHECK9-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13222 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]]
13223 // CHECK9:       omp.inner.for.end48:
13224 // CHECK9-NEXT:    store i64 400, i64* [[IT]], align 8
13225 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4
13226 // CHECK9-NEXT:    store i32 [[TMP35]], i32* [[LIN]], align 4
13227 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A28]], align 4
13228 // CHECK9-NEXT:    store i32 [[TMP36]], i32* [[A]], align 4
13229 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
13230 // CHECK9-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
13231 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
13232 // CHECK9-NEXT:    store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4
13233 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
13234 // CHECK9:       omp.inner.for.cond54:
13235 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
13236 // CHECK9-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12
13237 // CHECK9-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]]
13238 // CHECK9-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
13239 // CHECK9:       omp.inner.for.body56:
13240 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
13241 // CHECK9-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4
13242 // CHECK9-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
13243 // CHECK9-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
13244 // CHECK9-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12
13245 // CHECK9-NEXT:    [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
13246 // CHECK9-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1
13247 // CHECK9-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12
13248 // CHECK9-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
13249 // CHECK9-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP42]] to i32
13250 // CHECK9-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
13251 // CHECK9-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
13252 // CHECK9-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12
13253 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
13254 // CHECK9:       omp.body.continue64:
13255 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
13256 // CHECK9:       omp.inner.for.inc65:
13257 // CHECK9-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
13258 // CHECK9-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1
13259 // CHECK9-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
13260 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]]
13261 // CHECK9:       omp.inner.for.end67:
13262 // CHECK9-NEXT:    store i16 22, i16* [[IT53]], align 2
13263 // CHECK9-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
13264 // CHECK9-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4
13265 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
13266 // CHECK9-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
13267 // CHECK9-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
13268 // CHECK9-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4
13269 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
13270 // CHECK9:       omp.inner.for.cond73:
13271 // CHECK9-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
13272 // CHECK9-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15
13273 // CHECK9-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]]
13274 // CHECK9-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
13275 // CHECK9:       omp.inner.for.body75:
13276 // CHECK9-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
13277 // CHECK9-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1
13278 // CHECK9-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
13279 // CHECK9-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
13280 // CHECK9-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15
13281 // CHECK9-NEXT:    [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
13282 // CHECK9-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1
13283 // CHECK9-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15
13284 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
13285 // CHECK9-NEXT:    [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
13286 // CHECK9-NEXT:    [[CONV80:%.*]] = fpext float [[TMP50]] to double
13287 // CHECK9-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
13288 // CHECK9-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
13289 // CHECK9-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
13290 // CHECK9-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
13291 // CHECK9-NEXT:    [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
13292 // CHECK9-NEXT:    [[CONV84:%.*]] = fpext float [[TMP51]] to double
13293 // CHECK9-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
13294 // CHECK9-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
13295 // CHECK9-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
13296 // CHECK9-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
13297 // CHECK9-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2
13298 // CHECK9-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
13299 // CHECK9-NEXT:    [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00
13300 // CHECK9-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
13301 // CHECK9-NEXT:    [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]]
13302 // CHECK9-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]]
13303 // CHECK9-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3
13304 // CHECK9-NEXT:    [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
13305 // CHECK9-NEXT:    [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00
13306 // CHECK9-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
13307 // CHECK9-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
13308 // CHECK9-NEXT:    [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
13309 // CHECK9-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1
13310 // CHECK9-NEXT:    store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15
13311 // CHECK9-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
13312 // CHECK9-NEXT:    [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
13313 // CHECK9-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP56]] to i32
13314 // CHECK9-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
13315 // CHECK9-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
13316 // CHECK9-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15
13317 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
13318 // CHECK9:       omp.body.continue97:
13319 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
13320 // CHECK9:       omp.inner.for.inc98:
13321 // CHECK9-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
13322 // CHECK9-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1
13323 // CHECK9-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
13324 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]]
13325 // CHECK9:       omp.inner.for.end100:
13326 // CHECK9-NEXT:    store i8 96, i8* [[IT72]], align 1
13327 // CHECK9-NEXT:    [[TMP58:%.*]] = load i32, i32* [[A]], align 4
13328 // CHECK9-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
13329 // CHECK9-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
13330 // CHECK9-NEXT:    ret i32 [[TMP58]]
13331 //
13332 //
13333 // CHECK9-LABEL: define {{[^@]+}}@_Z3bari
13334 // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
13335 // CHECK9-NEXT:  entry:
13336 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13337 // CHECK9-NEXT:    [[A:%.*]] = alloca i32, align 4
13338 // CHECK9-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
13339 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13340 // CHECK9-NEXT:    store i32 0, i32* [[A]], align 4
13341 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
13342 // CHECK9-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
13343 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
13344 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
13345 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
13346 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
13347 // CHECK9-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
13348 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
13349 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
13350 // CHECK9-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
13351 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
13352 // CHECK9-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
13353 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
13354 // CHECK9-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
13355 // CHECK9-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
13356 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
13357 // CHECK9-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
13358 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
13359 // CHECK9-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
13360 // CHECK9-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
13361 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
13362 // CHECK9-NEXT:    ret i32 [[TMP8]]
13363 //
13364 //
13365 // CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
13366 // CHECK9-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
13367 // CHECK9-NEXT:  entry:
13368 // CHECK9-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
13369 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13370 // CHECK9-NEXT:    [[B:%.*]] = alloca i32, align 4
13371 // CHECK9-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
13372 // CHECK9-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
13373 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i64, align 8
13374 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
13375 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
13376 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
13377 // CHECK9-NEXT:    [[IT:%.*]] = alloca i64, align 8
13378 // CHECK9-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
13379 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13380 // CHECK9-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
13381 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
13382 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
13383 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
13384 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
13385 // CHECK9-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
13386 // CHECK9-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
13387 // CHECK9-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
13388 // CHECK9-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
13389 // CHECK9-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
13390 // CHECK9-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
13391 // CHECK9-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
13392 // CHECK9-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
13393 // CHECK9-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
13394 // CHECK9-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
13395 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13396 // CHECK9:       omp.inner.for.cond:
13397 // CHECK9-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
13398 // CHECK9-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
13399 // CHECK9-NEXT:    [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]]
13400 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13401 // CHECK9:       omp.inner.for.body:
13402 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
13403 // CHECK9-NEXT:    [[MUL:%.*]] = mul i64 [[TMP8]], 400
13404 // CHECK9-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
13405 // CHECK9-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
13406 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
13407 // CHECK9-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP9]] to double
13408 // CHECK9-NEXT:    [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
13409 // CHECK9-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
13410 // CHECK9-NEXT:    store double [[ADD2]], double* [[A]], align 8, !llvm.access.group !18
13411 // CHECK9-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
13412 // CHECK9-NEXT:    [[TMP10:%.*]] = load double, double* [[A3]], align 8, !llvm.access.group !18
13413 // CHECK9-NEXT:    [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00
13414 // CHECK9-NEXT:    store double [[INC]], double* [[A3]], align 8, !llvm.access.group !18
13415 // CHECK9-NEXT:    [[CONV4:%.*]] = fptosi double [[INC]] to i16
13416 // CHECK9-NEXT:    [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]]
13417 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]]
13418 // CHECK9-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
13419 // CHECK9-NEXT:    store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !18
13420 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13421 // CHECK9:       omp.body.continue:
13422 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13423 // CHECK9:       omp.inner.for.inc:
13424 // CHECK9-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
13425 // CHECK9-NEXT:    [[ADD6:%.*]] = add i64 [[TMP12]], 1
13426 // CHECK9-NEXT:    store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
13427 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
13428 // CHECK9:       omp.inner.for.end:
13429 // CHECK9-NEXT:    store i64 400, i64* [[IT]], align 8
13430 // CHECK9-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
13431 // CHECK9-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
13432 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i64 1
13433 // CHECK9-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2
13434 // CHECK9-NEXT:    [[CONV9:%.*]] = sext i16 [[TMP14]] to i32
13435 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[B]], align 4
13436 // CHECK9-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]]
13437 // CHECK9-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
13438 // CHECK9-NEXT:    call void @llvm.stackrestore(i8* [[TMP16]])
13439 // CHECK9-NEXT:    ret i32 [[ADD10]]
13440 //
13441 //
13442 // CHECK9-LABEL: define {{[^@]+}}@_ZL7fstatici
13443 // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
13444 // CHECK9-NEXT:  entry:
13445 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13446 // CHECK9-NEXT:    [[A:%.*]] = alloca i32, align 4
13447 // CHECK9-NEXT:    [[AA:%.*]] = alloca i16, align 2
13448 // CHECK9-NEXT:    [[AAA:%.*]] = alloca i8, align 1
13449 // CHECK9-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
13450 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13451 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13452 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13453 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13454 // CHECK9-NEXT:    store i32 0, i32* [[A]], align 4
13455 // CHECK9-NEXT:    store i16 0, i16* [[AA]], align 2
13456 // CHECK9-NEXT:    store i8 0, i8* [[AAA]], align 1
13457 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13458 // CHECK9-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
13459 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
13460 // CHECK9-NEXT:    ret i32 [[TMP0]]
13461 //
13462 //
13463 // CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
13464 // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
13465 // CHECK9-NEXT:  entry:
13466 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13467 // CHECK9-NEXT:    [[A:%.*]] = alloca i32, align 4
13468 // CHECK9-NEXT:    [[AA:%.*]] = alloca i16, align 2
13469 // CHECK9-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
13470 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i64, align 8
13471 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
13472 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
13473 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
13474 // CHECK9-NEXT:    [[I:%.*]] = alloca i64, align 8
13475 // CHECK9-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13476 // CHECK9-NEXT:    store i32 0, i32* [[A]], align 4
13477 // CHECK9-NEXT:    store i16 0, i16* [[AA]], align 2
13478 // CHECK9-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
13479 // CHECK9-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
13480 // CHECK9-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
13481 // CHECK9-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
13482 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13483 // CHECK9:       omp.inner.for.cond:
13484 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
13485 // CHECK9-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !21
13486 // CHECK9-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
13487 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13488 // CHECK9:       omp.inner.for.body:
13489 // CHECK9-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
13490 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
13491 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
13492 // CHECK9-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !21
13493 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21
13494 // CHECK9-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
13495 // CHECK9-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !21
13496 // CHECK9-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21
13497 // CHECK9-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
13498 // CHECK9-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
13499 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
13500 // CHECK9-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !21
13501 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
13502 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
13503 // CHECK9-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
13504 // CHECK9-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
13505 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13506 // CHECK9:       omp.body.continue:
13507 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13508 // CHECK9:       omp.inner.for.inc:
13509 // CHECK9-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
13510 // CHECK9-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
13511 // CHECK9-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
13512 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
13513 // CHECK9:       omp.inner.for.end:
13514 // CHECK9-NEXT:    store i64 11, i64* [[I]], align 8
13515 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
13516 // CHECK9-NEXT:    ret i32 [[TMP8]]
13517 //
13518 //
13519 // CHECK10-LABEL: define {{[^@]+}}@_Z7get_valv
13520 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
13521 // CHECK10-NEXT:  entry:
13522 // CHECK10-NEXT:    ret i64 0
13523 //
13524 //
13525 // CHECK10-LABEL: define {{[^@]+}}@_Z3fooi
13526 // CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
13527 // CHECK10-NEXT:  entry:
13528 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13529 // CHECK10-NEXT:    [[A:%.*]] = alloca i32, align 4
13530 // CHECK10-NEXT:    [[AA:%.*]] = alloca i16, align 2
13531 // CHECK10-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
13532 // CHECK10-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
13533 // CHECK10-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
13534 // CHECK10-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
13535 // CHECK10-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
13536 // CHECK10-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
13537 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13538 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13539 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13540 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13541 // CHECK10-NEXT:    [[I:%.*]] = alloca i32, align 4
13542 // CHECK10-NEXT:    [[K:%.*]] = alloca i64, align 8
13543 // CHECK10-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
13544 // CHECK10-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
13545 // CHECK10-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
13546 // CHECK10-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
13547 // CHECK10-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
13548 // CHECK10-NEXT:    [[I7:%.*]] = alloca i32, align 4
13549 // CHECK10-NEXT:    [[K8:%.*]] = alloca i64, align 8
13550 // CHECK10-NEXT:    [[LIN:%.*]] = alloca i32, align 4
13551 // CHECK10-NEXT:    [[_TMP20:%.*]] = alloca i64, align 8
13552 // CHECK10-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
13553 // CHECK10-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
13554 // CHECK10-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
13555 // CHECK10-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
13556 // CHECK10-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
13557 // CHECK10-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
13558 // CHECK10-NEXT:    [[IT:%.*]] = alloca i64, align 8
13559 // CHECK10-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
13560 // CHECK10-NEXT:    [[A28:%.*]] = alloca i32, align 4
13561 // CHECK10-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
13562 // CHECK10-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
13563 // CHECK10-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
13564 // CHECK10-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
13565 // CHECK10-NEXT:    [[IT53:%.*]] = alloca i16, align 2
13566 // CHECK10-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13567 // CHECK10-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
13568 // CHECK10-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
13569 // CHECK10-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
13570 // CHECK10-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
13571 // CHECK10-NEXT:    [[IT72:%.*]] = alloca i8, align 1
13572 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13573 // CHECK10-NEXT:    store i32 0, i32* [[A]], align 4
13574 // CHECK10-NEXT:    store i16 0, i16* [[AA]], align 2
13575 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
13576 // CHECK10-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
13577 // CHECK10-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
13578 // CHECK10-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
13579 // CHECK10-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
13580 // CHECK10-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
13581 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
13582 // CHECK10-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
13583 // CHECK10-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
13584 // CHECK10-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
13585 // CHECK10-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
13586 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13587 // CHECK10-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
13588 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13589 // CHECK10-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
13590 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13591 // CHECK10:       omp.inner.for.cond:
13592 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
13593 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
13594 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
13595 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13596 // CHECK10:       omp.inner.for.body:
13597 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
13598 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5
13599 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
13600 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
13601 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13602 // CHECK10:       omp.body.continue:
13603 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13604 // CHECK10:       omp.inner.for.inc:
13605 // CHECK10-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
13606 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
13607 // CHECK10-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
13608 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
13609 // CHECK10:       omp.inner.for.end:
13610 // CHECK10-NEXT:    store i32 33, i32* [[I]], align 4
13611 // CHECK10-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
13612 // CHECK10-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
13613 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
13614 // CHECK10-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
13615 // CHECK10-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
13616 // CHECK10-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4
13617 // CHECK10-NEXT:    [[TMP12:%.*]] = load i64, i64* [[K]], align 8
13618 // CHECK10-NEXT:    store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8
13619 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
13620 // CHECK10:       omp.inner.for.cond9:
13621 // CHECK10-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13622 // CHECK10-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
13623 // CHECK10-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
13624 // CHECK10-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
13625 // CHECK10:       omp.inner.for.body11:
13626 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13627 // CHECK10-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1
13628 // CHECK10-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
13629 // CHECK10-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6
13630 // CHECK10-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6
13631 // CHECK10-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13632 // CHECK10-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3
13633 // CHECK10-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
13634 // CHECK10-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]]
13635 // CHECK10-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6
13636 // CHECK10-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6
13637 // CHECK10-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
13638 // CHECK10-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6
13639 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
13640 // CHECK10:       omp.body.continue16:
13641 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
13642 // CHECK10:       omp.inner.for.inc17:
13643 // CHECK10-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13644 // CHECK10-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
13645 // CHECK10-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
13646 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]]
13647 // CHECK10:       omp.inner.for.end19:
13648 // CHECK10-NEXT:    store i32 1, i32* [[I7]], align 4
13649 // CHECK10-NEXT:    [[TMP20:%.*]] = load i64, i64* [[K8]], align 8
13650 // CHECK10-NEXT:    store i64 [[TMP20]], i64* [[K]], align 8
13651 // CHECK10-NEXT:    store i32 12, i32* [[LIN]], align 4
13652 // CHECK10-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
13653 // CHECK10-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
13654 // CHECK10-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
13655 // CHECK10-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8
13656 // CHECK10-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4
13657 // CHECK10-NEXT:    store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4
13658 // CHECK10-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A]], align 4
13659 // CHECK10-NEXT:    store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4
13660 // CHECK10-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
13661 // CHECK10-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
13662 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
13663 // CHECK10:       omp.inner.for.cond29:
13664 // CHECK10-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13665 // CHECK10-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9
13666 // CHECK10-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]]
13667 // CHECK10-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
13668 // CHECK10:       omp.inner.for.body31:
13669 // CHECK10-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13670 // CHECK10-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP26]], 400
13671 // CHECK10-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
13672 // CHECK10-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9
13673 // CHECK10-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9
13674 // CHECK10-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP27]] to i64
13675 // CHECK10-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13676 // CHECK10-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
13677 // CHECK10-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]]
13678 // CHECK10-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
13679 // CHECK10-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
13680 // CHECK10-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9
13681 // CHECK10-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9
13682 // CHECK10-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP30]] to i64
13683 // CHECK10-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13684 // CHECK10-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
13685 // CHECK10-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]]
13686 // CHECK10-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
13687 // CHECK10-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
13688 // CHECK10-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9
13689 // CHECK10-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
13690 // CHECK10-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP33]] to i32
13691 // CHECK10-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
13692 // CHECK10-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
13693 // CHECK10-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9
13694 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
13695 // CHECK10:       omp.body.continue45:
13696 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
13697 // CHECK10:       omp.inner.for.inc46:
13698 // CHECK10-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13699 // CHECK10-NEXT:    [[ADD47:%.*]] = add i64 [[TMP34]], 1
13700 // CHECK10-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
13701 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]]
13702 // CHECK10:       omp.inner.for.end48:
13703 // CHECK10-NEXT:    store i64 400, i64* [[IT]], align 8
13704 // CHECK10-NEXT:    [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4
13705 // CHECK10-NEXT:    store i32 [[TMP35]], i32* [[LIN]], align 4
13706 // CHECK10-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A28]], align 4
13707 // CHECK10-NEXT:    store i32 [[TMP36]], i32* [[A]], align 4
13708 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
13709 // CHECK10-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
13710 // CHECK10-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
13711 // CHECK10-NEXT:    store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4
13712 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
13713 // CHECK10:       omp.inner.for.cond54:
13714 // CHECK10-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
13715 // CHECK10-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12
13716 // CHECK10-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]]
13717 // CHECK10-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
13718 // CHECK10:       omp.inner.for.body56:
13719 // CHECK10-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
13720 // CHECK10-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4
13721 // CHECK10-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
13722 // CHECK10-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
13723 // CHECK10-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12
13724 // CHECK10-NEXT:    [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
13725 // CHECK10-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1
13726 // CHECK10-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12
13727 // CHECK10-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
13728 // CHECK10-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP42]] to i32
13729 // CHECK10-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
13730 // CHECK10-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
13731 // CHECK10-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12
13732 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
13733 // CHECK10:       omp.body.continue64:
13734 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
13735 // CHECK10:       omp.inner.for.inc65:
13736 // CHECK10-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
13737 // CHECK10-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1
13738 // CHECK10-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
13739 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]]
13740 // CHECK10:       omp.inner.for.end67:
13741 // CHECK10-NEXT:    store i16 22, i16* [[IT53]], align 2
13742 // CHECK10-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
13743 // CHECK10-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4
13744 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
13745 // CHECK10-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
13746 // CHECK10-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
13747 // CHECK10-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4
13748 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
13749 // CHECK10:       omp.inner.for.cond73:
13750 // CHECK10-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
13751 // CHECK10-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15
13752 // CHECK10-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]]
13753 // CHECK10-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
13754 // CHECK10:       omp.inner.for.body75:
13755 // CHECK10-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
13756 // CHECK10-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1
13757 // CHECK10-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
13758 // CHECK10-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
13759 // CHECK10-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15
13760 // CHECK10-NEXT:    [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
13761 // CHECK10-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1
13762 // CHECK10-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15
13763 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
13764 // CHECK10-NEXT:    [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
13765 // CHECK10-NEXT:    [[CONV80:%.*]] = fpext float [[TMP50]] to double
13766 // CHECK10-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
13767 // CHECK10-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
13768 // CHECK10-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
13769 // CHECK10-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
13770 // CHECK10-NEXT:    [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
13771 // CHECK10-NEXT:    [[CONV84:%.*]] = fpext float [[TMP51]] to double
13772 // CHECK10-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
13773 // CHECK10-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
13774 // CHECK10-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
13775 // CHECK10-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
13776 // CHECK10-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2
13777 // CHECK10-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
13778 // CHECK10-NEXT:    [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00
13779 // CHECK10-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
13780 // CHECK10-NEXT:    [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]]
13781 // CHECK10-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]]
13782 // CHECK10-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3
13783 // CHECK10-NEXT:    [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
13784 // CHECK10-NEXT:    [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00
13785 // CHECK10-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
13786 // CHECK10-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
13787 // CHECK10-NEXT:    [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
13788 // CHECK10-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1
13789 // CHECK10-NEXT:    store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15
13790 // CHECK10-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
13791 // CHECK10-NEXT:    [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
13792 // CHECK10-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP56]] to i32
13793 // CHECK10-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
13794 // CHECK10-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
13795 // CHECK10-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15
13796 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
13797 // CHECK10:       omp.body.continue97:
13798 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
13799 // CHECK10:       omp.inner.for.inc98:
13800 // CHECK10-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
13801 // CHECK10-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1
13802 // CHECK10-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
13803 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]]
13804 // CHECK10:       omp.inner.for.end100:
13805 // CHECK10-NEXT:    store i8 96, i8* [[IT72]], align 1
13806 // CHECK10-NEXT:    [[TMP58:%.*]] = load i32, i32* [[A]], align 4
13807 // CHECK10-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
13808 // CHECK10-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
13809 // CHECK10-NEXT:    ret i32 [[TMP58]]
13810 //
13811 //
13812 // CHECK10-LABEL: define {{[^@]+}}@_Z3bari
13813 // CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
13814 // CHECK10-NEXT:  entry:
13815 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13816 // CHECK10-NEXT:    [[A:%.*]] = alloca i32, align 4
13817 // CHECK10-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
13818 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13819 // CHECK10-NEXT:    store i32 0, i32* [[A]], align 4
13820 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
13821 // CHECK10-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
13822 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
13823 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
13824 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
13825 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
13826 // CHECK10-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
13827 // CHECK10-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
13828 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
13829 // CHECK10-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
13830 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
13831 // CHECK10-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
13832 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
13833 // CHECK10-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
13834 // CHECK10-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
13835 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
13836 // CHECK10-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
13837 // CHECK10-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
13838 // CHECK10-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
13839 // CHECK10-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
13840 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
13841 // CHECK10-NEXT:    ret i32 [[TMP8]]
13842 //
13843 //
13844 // CHECK10-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
13845 // CHECK10-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
13846 // CHECK10-NEXT:  entry:
13847 // CHECK10-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
13848 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13849 // CHECK10-NEXT:    [[B:%.*]] = alloca i32, align 4
13850 // CHECK10-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
13851 // CHECK10-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
13852 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i64, align 8
13853 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
13854 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
13855 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
13856 // CHECK10-NEXT:    [[IT:%.*]] = alloca i64, align 8
13857 // CHECK10-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
13858 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13859 // CHECK10-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
13860 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
13861 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
13862 // CHECK10-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
13863 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
13864 // CHECK10-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
13865 // CHECK10-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
13866 // CHECK10-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
13867 // CHECK10-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
13868 // CHECK10-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
13869 // CHECK10-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
13870 // CHECK10-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
13871 // CHECK10-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
13872 // CHECK10-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
13873 // CHECK10-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
13874 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13875 // CHECK10:       omp.inner.for.cond:
13876 // CHECK10-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
13877 // CHECK10-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
13878 // CHECK10-NEXT:    [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]]
13879 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13880 // CHECK10:       omp.inner.for.body:
13881 // CHECK10-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
13882 // CHECK10-NEXT:    [[MUL:%.*]] = mul i64 [[TMP8]], 400
13883 // CHECK10-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
13884 // CHECK10-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
13885 // CHECK10-NEXT:    [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
13886 // CHECK10-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP9]] to double
13887 // CHECK10-NEXT:    [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
13888 // CHECK10-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
13889 // CHECK10-NEXT:    store double [[ADD2]], double* [[A]], align 8, !llvm.access.group !18
13890 // CHECK10-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
13891 // CHECK10-NEXT:    [[TMP10:%.*]] = load double, double* [[A3]], align 8, !llvm.access.group !18
13892 // CHECK10-NEXT:    [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00
13893 // CHECK10-NEXT:    store double [[INC]], double* [[A3]], align 8, !llvm.access.group !18
13894 // CHECK10-NEXT:    [[CONV4:%.*]] = fptosi double [[INC]] to i16
13895 // CHECK10-NEXT:    [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]]
13896 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]]
13897 // CHECK10-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
13898 // CHECK10-NEXT:    store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !18
13899 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13900 // CHECK10:       omp.body.continue:
13901 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13902 // CHECK10:       omp.inner.for.inc:
13903 // CHECK10-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
13904 // CHECK10-NEXT:    [[ADD6:%.*]] = add i64 [[TMP12]], 1
13905 // CHECK10-NEXT:    store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
13906 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
13907 // CHECK10:       omp.inner.for.end:
13908 // CHECK10-NEXT:    store i64 400, i64* [[IT]], align 8
13909 // CHECK10-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
13910 // CHECK10-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
13911 // CHECK10-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i64 1
13912 // CHECK10-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2
13913 // CHECK10-NEXT:    [[CONV9:%.*]] = sext i16 [[TMP14]] to i32
13914 // CHECK10-NEXT:    [[TMP15:%.*]] = load i32, i32* [[B]], align 4
13915 // CHECK10-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]]
13916 // CHECK10-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
13917 // CHECK10-NEXT:    call void @llvm.stackrestore(i8* [[TMP16]])
13918 // CHECK10-NEXT:    ret i32 [[ADD10]]
13919 //
13920 //
13921 // CHECK10-LABEL: define {{[^@]+}}@_ZL7fstatici
13922 // CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
13923 // CHECK10-NEXT:  entry:
13924 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13925 // CHECK10-NEXT:    [[A:%.*]] = alloca i32, align 4
13926 // CHECK10-NEXT:    [[AA:%.*]] = alloca i16, align 2
13927 // CHECK10-NEXT:    [[AAA:%.*]] = alloca i8, align 1
13928 // CHECK10-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
13929 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13930 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13931 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13932 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13933 // CHECK10-NEXT:    store i32 0, i32* [[A]], align 4
13934 // CHECK10-NEXT:    store i16 0, i16* [[AA]], align 2
13935 // CHECK10-NEXT:    store i8 0, i8* [[AAA]], align 1
13936 // CHECK10-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13937 // CHECK10-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
13938 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
13939 // CHECK10-NEXT:    ret i32 [[TMP0]]
13940 //
13941 //
13942 // CHECK10-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
13943 // CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
13944 // CHECK10-NEXT:  entry:
13945 // CHECK10-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13946 // CHECK10-NEXT:    [[A:%.*]] = alloca i32, align 4
13947 // CHECK10-NEXT:    [[AA:%.*]] = alloca i16, align 2
13948 // CHECK10-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
13949 // CHECK10-NEXT:    [[TMP:%.*]] = alloca i64, align 8
13950 // CHECK10-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
13951 // CHECK10-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
13952 // CHECK10-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
13953 // CHECK10-NEXT:    [[I:%.*]] = alloca i64, align 8
13954 // CHECK10-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13955 // CHECK10-NEXT:    store i32 0, i32* [[A]], align 4
13956 // CHECK10-NEXT:    store i16 0, i16* [[AA]], align 2
13957 // CHECK10-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
13958 // CHECK10-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
13959 // CHECK10-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
13960 // CHECK10-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
13961 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13962 // CHECK10:       omp.inner.for.cond:
13963 // CHECK10-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
13964 // CHECK10-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !21
13965 // CHECK10-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
13966 // CHECK10-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13967 // CHECK10:       omp.inner.for.body:
13968 // CHECK10-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
13969 // CHECK10-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
13970 // CHECK10-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
13971 // CHECK10-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !21
13972 // CHECK10-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21
13973 // CHECK10-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
13974 // CHECK10-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !21
13975 // CHECK10-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21
13976 // CHECK10-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
13977 // CHECK10-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
13978 // CHECK10-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
13979 // CHECK10-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !21
13980 // CHECK10-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
13981 // CHECK10-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
13982 // CHECK10-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
13983 // CHECK10-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
13984 // CHECK10-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13985 // CHECK10:       omp.body.continue:
13986 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13987 // CHECK10:       omp.inner.for.inc:
13988 // CHECK10-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
13989 // CHECK10-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
13990 // CHECK10-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
13991 // CHECK10-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
13992 // CHECK10:       omp.inner.for.end:
13993 // CHECK10-NEXT:    store i64 11, i64* [[I]], align 8
13994 // CHECK10-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
13995 // CHECK10-NEXT:    ret i32 [[TMP8]]
13996 //
13997 //
13998 // CHECK11-LABEL: define {{[^@]+}}@_Z7get_valv
13999 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
14000 // CHECK11-NEXT:  entry:
14001 // CHECK11-NEXT:    ret i64 0
14002 //
14003 //
14004 // CHECK11-LABEL: define {{[^@]+}}@_Z3fooi
14005 // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
14006 // CHECK11-NEXT:  entry:
14007 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14008 // CHECK11-NEXT:    [[A:%.*]] = alloca i32, align 4
14009 // CHECK11-NEXT:    [[AA:%.*]] = alloca i16, align 2
14010 // CHECK11-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
14011 // CHECK11-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
14012 // CHECK11-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
14013 // CHECK11-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
14014 // CHECK11-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
14015 // CHECK11-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
14016 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14017 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14018 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14019 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14020 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
14021 // CHECK11-NEXT:    [[K:%.*]] = alloca i64, align 8
14022 // CHECK11-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
14023 // CHECK11-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
14024 // CHECK11-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
14025 // CHECK11-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
14026 // CHECK11-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
14027 // CHECK11-NEXT:    [[I7:%.*]] = alloca i32, align 4
14028 // CHECK11-NEXT:    [[K8:%.*]] = alloca i64, align 8
14029 // CHECK11-NEXT:    [[LIN:%.*]] = alloca i32, align 4
14030 // CHECK11-NEXT:    [[_TMP20:%.*]] = alloca i64, align 4
14031 // CHECK11-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
14032 // CHECK11-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
14033 // CHECK11-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
14034 // CHECK11-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
14035 // CHECK11-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
14036 // CHECK11-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
14037 // CHECK11-NEXT:    [[IT:%.*]] = alloca i64, align 8
14038 // CHECK11-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
14039 // CHECK11-NEXT:    [[A28:%.*]] = alloca i32, align 4
14040 // CHECK11-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
14041 // CHECK11-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
14042 // CHECK11-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
14043 // CHECK11-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
14044 // CHECK11-NEXT:    [[IT53:%.*]] = alloca i16, align 2
14045 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14046 // CHECK11-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
14047 // CHECK11-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
14048 // CHECK11-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
14049 // CHECK11-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
14050 // CHECK11-NEXT:    [[IT72:%.*]] = alloca i8, align 1
14051 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14052 // CHECK11-NEXT:    store i32 0, i32* [[A]], align 4
14053 // CHECK11-NEXT:    store i16 0, i16* [[AA]], align 2
14054 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14055 // CHECK11-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
14056 // CHECK11-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
14057 // CHECK11-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
14058 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
14059 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14060 // CHECK11-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
14061 // CHECK11-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
14062 // CHECK11-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
14063 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14064 // CHECK11-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
14065 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14066 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
14067 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14068 // CHECK11:       omp.inner.for.cond:
14069 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
14070 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
14071 // CHECK11-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
14072 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14073 // CHECK11:       omp.inner.for.body:
14074 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
14075 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
14076 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
14077 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
14078 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14079 // CHECK11:       omp.body.continue:
14080 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14081 // CHECK11:       omp.inner.for.inc:
14082 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
14083 // CHECK11-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
14084 // CHECK11-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
14085 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
14086 // CHECK11:       omp.inner.for.end:
14087 // CHECK11-NEXT:    store i32 33, i32* [[I]], align 4
14088 // CHECK11-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
14089 // CHECK11-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
14090 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
14091 // CHECK11-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
14092 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
14093 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4
14094 // CHECK11-NEXT:    [[TMP10:%.*]] = load i64, i64* [[K]], align 8
14095 // CHECK11-NEXT:    store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8
14096 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
14097 // CHECK11:       omp.inner.for.cond9:
14098 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14099 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
14100 // CHECK11-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
14101 // CHECK11-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
14102 // CHECK11:       omp.inner.for.body11:
14103 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14104 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1
14105 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
14106 // CHECK11-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7
14107 // CHECK11-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7
14108 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14109 // CHECK11-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3
14110 // CHECK11-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
14111 // CHECK11-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
14112 // CHECK11-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7
14113 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7
14114 // CHECK11-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
14115 // CHECK11-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7
14116 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
14117 // CHECK11:       omp.body.continue16:
14118 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
14119 // CHECK11:       omp.inner.for.inc17:
14120 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14121 // CHECK11-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
14122 // CHECK11-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14123 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]]
14124 // CHECK11:       omp.inner.for.end19:
14125 // CHECK11-NEXT:    store i32 1, i32* [[I7]], align 4
14126 // CHECK11-NEXT:    [[TMP18:%.*]] = load i64, i64* [[K8]], align 8
14127 // CHECK11-NEXT:    store i64 [[TMP18]], i64* [[K]], align 8
14128 // CHECK11-NEXT:    store i32 12, i32* [[LIN]], align 4
14129 // CHECK11-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
14130 // CHECK11-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
14131 // CHECK11-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
14132 // CHECK11-NEXT:    store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8
14133 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4
14134 // CHECK11-NEXT:    store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4
14135 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
14136 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4
14137 // CHECK11-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
14138 // CHECK11-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
14139 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
14140 // CHECK11:       omp.inner.for.cond29:
14141 // CHECK11-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14142 // CHECK11-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10
14143 // CHECK11-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
14144 // CHECK11-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
14145 // CHECK11:       omp.inner.for.body31:
14146 // CHECK11-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14147 // CHECK11-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP24]], 400
14148 // CHECK11-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
14149 // CHECK11-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10
14150 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10
14151 // CHECK11-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP25]] to i64
14152 // CHECK11-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14153 // CHECK11-NEXT:    [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
14154 // CHECK11-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]]
14155 // CHECK11-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
14156 // CHECK11-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
14157 // CHECK11-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10
14158 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10
14159 // CHECK11-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP28]] to i64
14160 // CHECK11-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14161 // CHECK11-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
14162 // CHECK11-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]]
14163 // CHECK11-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
14164 // CHECK11-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
14165 // CHECK11-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10
14166 // CHECK11-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
14167 // CHECK11-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP31]] to i32
14168 // CHECK11-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
14169 // CHECK11-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
14170 // CHECK11-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10
14171 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
14172 // CHECK11:       omp.body.continue45:
14173 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
14174 // CHECK11:       omp.inner.for.inc46:
14175 // CHECK11-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14176 // CHECK11-NEXT:    [[ADD47:%.*]] = add i64 [[TMP32]], 1
14177 // CHECK11-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14178 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]]
14179 // CHECK11:       omp.inner.for.end48:
14180 // CHECK11-NEXT:    store i64 400, i64* [[IT]], align 8
14181 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4
14182 // CHECK11-NEXT:    store i32 [[TMP33]], i32* [[LIN]], align 4
14183 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[A28]], align 4
14184 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[A]], align 4
14185 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
14186 // CHECK11-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
14187 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
14188 // CHECK11-NEXT:    store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4
14189 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
14190 // CHECK11:       omp.inner.for.cond54:
14191 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
14192 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13
14193 // CHECK11-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]]
14194 // CHECK11-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
14195 // CHECK11:       omp.inner.for.body56:
14196 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
14197 // CHECK11-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4
14198 // CHECK11-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
14199 // CHECK11-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
14200 // CHECK11-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13
14201 // CHECK11-NEXT:    [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
14202 // CHECK11-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1
14203 // CHECK11-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13
14204 // CHECK11-NEXT:    [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
14205 // CHECK11-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP40]] to i32
14206 // CHECK11-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
14207 // CHECK11-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
14208 // CHECK11-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13
14209 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
14210 // CHECK11:       omp.body.continue64:
14211 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
14212 // CHECK11:       omp.inner.for.inc65:
14213 // CHECK11-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
14214 // CHECK11-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1
14215 // CHECK11-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
14216 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]]
14217 // CHECK11:       omp.inner.for.end67:
14218 // CHECK11-NEXT:    store i16 22, i16* [[IT53]], align 2
14219 // CHECK11-NEXT:    [[TMP42:%.*]] = load i32, i32* [[A]], align 4
14220 // CHECK11-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4
14221 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
14222 // CHECK11-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
14223 // CHECK11-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
14224 // CHECK11-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4
14225 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
14226 // CHECK11:       omp.inner.for.cond73:
14227 // CHECK11-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
14228 // CHECK11-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16
14229 // CHECK11-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]]
14230 // CHECK11-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
14231 // CHECK11:       omp.inner.for.body75:
14232 // CHECK11-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
14233 // CHECK11-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1
14234 // CHECK11-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
14235 // CHECK11-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
14236 // CHECK11-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16
14237 // CHECK11-NEXT:    [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
14238 // CHECK11-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1
14239 // CHECK11-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16
14240 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
14241 // CHECK11-NEXT:    [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
14242 // CHECK11-NEXT:    [[CONV80:%.*]] = fpext float [[TMP48]] to double
14243 // CHECK11-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
14244 // CHECK11-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
14245 // CHECK11-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
14246 // CHECK11-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
14247 // CHECK11-NEXT:    [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
14248 // CHECK11-NEXT:    [[CONV84:%.*]] = fpext float [[TMP49]] to double
14249 // CHECK11-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
14250 // CHECK11-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
14251 // CHECK11-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
14252 // CHECK11-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
14253 // CHECK11-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2
14254 // CHECK11-NEXT:    [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
14255 // CHECK11-NEXT:    [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00
14256 // CHECK11-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
14257 // CHECK11-NEXT:    [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]]
14258 // CHECK11-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]]
14259 // CHECK11-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3
14260 // CHECK11-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
14261 // CHECK11-NEXT:    [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00
14262 // CHECK11-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
14263 // CHECK11-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
14264 // CHECK11-NEXT:    [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
14265 // CHECK11-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1
14266 // CHECK11-NEXT:    store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16
14267 // CHECK11-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
14268 // CHECK11-NEXT:    [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
14269 // CHECK11-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP54]] to i32
14270 // CHECK11-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
14271 // CHECK11-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
14272 // CHECK11-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16
14273 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
14274 // CHECK11:       omp.body.continue97:
14275 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
14276 // CHECK11:       omp.inner.for.inc98:
14277 // CHECK11-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
14278 // CHECK11-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1
14279 // CHECK11-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
14280 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]]
14281 // CHECK11:       omp.inner.for.end100:
14282 // CHECK11-NEXT:    store i8 96, i8* [[IT72]], align 1
14283 // CHECK11-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
14284 // CHECK11-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
14285 // CHECK11-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
14286 // CHECK11-NEXT:    ret i32 [[TMP56]]
14287 //
14288 //
14289 // CHECK11-LABEL: define {{[^@]+}}@_Z3bari
14290 // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
14291 // CHECK11-NEXT:  entry:
14292 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14293 // CHECK11-NEXT:    [[A:%.*]] = alloca i32, align 4
14294 // CHECK11-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
14295 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14296 // CHECK11-NEXT:    store i32 0, i32* [[A]], align 4
14297 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14298 // CHECK11-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
14299 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
14300 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
14301 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
14302 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14303 // CHECK11-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
14304 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
14305 // CHECK11-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
14306 // CHECK11-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
14307 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
14308 // CHECK11-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
14309 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
14310 // CHECK11-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
14311 // CHECK11-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
14312 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
14313 // CHECK11-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
14314 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
14315 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
14316 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
14317 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
14318 // CHECK11-NEXT:    ret i32 [[TMP8]]
14319 //
14320 //
14321 // CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
14322 // CHECK11-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
14323 // CHECK11-NEXT:  entry:
14324 // CHECK11-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
14325 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14326 // CHECK11-NEXT:    [[B:%.*]] = alloca i32, align 4
14327 // CHECK11-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
14328 // CHECK11-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
14329 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i64, align 4
14330 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
14331 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
14332 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
14333 // CHECK11-NEXT:    [[IT:%.*]] = alloca i64, align 8
14334 // CHECK11-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
14335 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14336 // CHECK11-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
14337 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14338 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
14339 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
14340 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
14341 // CHECK11-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
14342 // CHECK11-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
14343 // CHECK11-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
14344 // CHECK11-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
14345 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
14346 // CHECK11-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
14347 // CHECK11-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
14348 // CHECK11-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
14349 // CHECK11-NEXT:    store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8
14350 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14351 // CHECK11:       omp.inner.for.cond:
14352 // CHECK11-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
14353 // CHECK11-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19
14354 // CHECK11-NEXT:    [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]]
14355 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14356 // CHECK11:       omp.inner.for.body:
14357 // CHECK11-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
14358 // CHECK11-NEXT:    [[MUL:%.*]] = mul i64 [[TMP7]], 400
14359 // CHECK11-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
14360 // CHECK11-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19
14361 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
14362 // CHECK11-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP8]] to double
14363 // CHECK11-NEXT:    [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
14364 // CHECK11-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
14365 // CHECK11-NEXT:    store double [[ADD2]], double* [[A]], align 4, !llvm.access.group !19
14366 // CHECK11-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
14367 // CHECK11-NEXT:    [[TMP9:%.*]] = load double, double* [[A3]], align 4, !llvm.access.group !19
14368 // CHECK11-NEXT:    [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00
14369 // CHECK11-NEXT:    store double [[INC]], double* [[A3]], align 4, !llvm.access.group !19
14370 // CHECK11-NEXT:    [[CONV4:%.*]] = fptosi double [[INC]] to i16
14371 // CHECK11-NEXT:    [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]]
14372 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]]
14373 // CHECK11-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
14374 // CHECK11-NEXT:    store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !19
14375 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14376 // CHECK11:       omp.body.continue:
14377 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14378 // CHECK11:       omp.inner.for.inc:
14379 // CHECK11-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
14380 // CHECK11-NEXT:    [[ADD6:%.*]] = add i64 [[TMP11]], 1
14381 // CHECK11-NEXT:    store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
14382 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
14383 // CHECK11:       omp.inner.for.end:
14384 // CHECK11-NEXT:    store i64 400, i64* [[IT]], align 8
14385 // CHECK11-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
14386 // CHECK11-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
14387 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i32 1
14388 // CHECK11-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2
14389 // CHECK11-NEXT:    [[CONV9:%.*]] = sext i16 [[TMP13]] to i32
14390 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[B]], align 4
14391 // CHECK11-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]]
14392 // CHECK11-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
14393 // CHECK11-NEXT:    call void @llvm.stackrestore(i8* [[TMP15]])
14394 // CHECK11-NEXT:    ret i32 [[ADD10]]
14395 //
14396 //
14397 // CHECK11-LABEL: define {{[^@]+}}@_ZL7fstatici
14398 // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
14399 // CHECK11-NEXT:  entry:
14400 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14401 // CHECK11-NEXT:    [[A:%.*]] = alloca i32, align 4
14402 // CHECK11-NEXT:    [[AA:%.*]] = alloca i16, align 2
14403 // CHECK11-NEXT:    [[AAA:%.*]] = alloca i8, align 1
14404 // CHECK11-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
14405 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14406 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14407 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14408 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14409 // CHECK11-NEXT:    store i32 0, i32* [[A]], align 4
14410 // CHECK11-NEXT:    store i16 0, i16* [[AA]], align 2
14411 // CHECK11-NEXT:    store i8 0, i8* [[AAA]], align 1
14412 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14413 // CHECK11-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
14414 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
14415 // CHECK11-NEXT:    ret i32 [[TMP0]]
14416 //
14417 //
14418 // CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
14419 // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
14420 // CHECK11-NEXT:  entry:
14421 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14422 // CHECK11-NEXT:    [[A:%.*]] = alloca i32, align 4
14423 // CHECK11-NEXT:    [[AA:%.*]] = alloca i16, align 2
14424 // CHECK11-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
14425 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i64, align 4
14426 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
14427 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
14428 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
14429 // CHECK11-NEXT:    [[I:%.*]] = alloca i64, align 8
14430 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14431 // CHECK11-NEXT:    store i32 0, i32* [[A]], align 4
14432 // CHECK11-NEXT:    store i16 0, i16* [[AA]], align 2
14433 // CHECK11-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
14434 // CHECK11-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
14435 // CHECK11-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
14436 // CHECK11-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
14437 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14438 // CHECK11:       omp.inner.for.cond:
14439 // CHECK11-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
14440 // CHECK11-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !22
14441 // CHECK11-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
14442 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14443 // CHECK11:       omp.inner.for.body:
14444 // CHECK11-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
14445 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
14446 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
14447 // CHECK11-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !22
14448 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22
14449 // CHECK11-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
14450 // CHECK11-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !22
14451 // CHECK11-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22
14452 // CHECK11-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
14453 // CHECK11-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
14454 // CHECK11-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
14455 // CHECK11-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !22
14456 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
14457 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
14458 // CHECK11-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
14459 // CHECK11-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
14460 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14461 // CHECK11:       omp.body.continue:
14462 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14463 // CHECK11:       omp.inner.for.inc:
14464 // CHECK11-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
14465 // CHECK11-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
14466 // CHECK11-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
14467 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
14468 // CHECK11:       omp.inner.for.end:
14469 // CHECK11-NEXT:    store i64 11, i64* [[I]], align 8
14470 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
14471 // CHECK11-NEXT:    ret i32 [[TMP8]]
14472 //
14473 //
14474 // CHECK12-LABEL: define {{[^@]+}}@_Z7get_valv
14475 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
14476 // CHECK12-NEXT:  entry:
14477 // CHECK12-NEXT:    ret i64 0
14478 //
14479 //
14480 // CHECK12-LABEL: define {{[^@]+}}@_Z3fooi
14481 // CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
14482 // CHECK12-NEXT:  entry:
14483 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14484 // CHECK12-NEXT:    [[A:%.*]] = alloca i32, align 4
14485 // CHECK12-NEXT:    [[AA:%.*]] = alloca i16, align 2
14486 // CHECK12-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
14487 // CHECK12-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
14488 // CHECK12-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
14489 // CHECK12-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
14490 // CHECK12-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
14491 // CHECK12-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
14492 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14493 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14494 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14495 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14496 // CHECK12-NEXT:    [[I:%.*]] = alloca i32, align 4
14497 // CHECK12-NEXT:    [[K:%.*]] = alloca i64, align 8
14498 // CHECK12-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
14499 // CHECK12-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
14500 // CHECK12-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
14501 // CHECK12-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
14502 // CHECK12-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
14503 // CHECK12-NEXT:    [[I7:%.*]] = alloca i32, align 4
14504 // CHECK12-NEXT:    [[K8:%.*]] = alloca i64, align 8
14505 // CHECK12-NEXT:    [[LIN:%.*]] = alloca i32, align 4
14506 // CHECK12-NEXT:    [[_TMP20:%.*]] = alloca i64, align 4
14507 // CHECK12-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
14508 // CHECK12-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
14509 // CHECK12-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
14510 // CHECK12-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
14511 // CHECK12-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
14512 // CHECK12-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
14513 // CHECK12-NEXT:    [[IT:%.*]] = alloca i64, align 8
14514 // CHECK12-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
14515 // CHECK12-NEXT:    [[A28:%.*]] = alloca i32, align 4
14516 // CHECK12-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
14517 // CHECK12-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
14518 // CHECK12-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
14519 // CHECK12-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
14520 // CHECK12-NEXT:    [[IT53:%.*]] = alloca i16, align 2
14521 // CHECK12-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14522 // CHECK12-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
14523 // CHECK12-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
14524 // CHECK12-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
14525 // CHECK12-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
14526 // CHECK12-NEXT:    [[IT72:%.*]] = alloca i8, align 1
14527 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14528 // CHECK12-NEXT:    store i32 0, i32* [[A]], align 4
14529 // CHECK12-NEXT:    store i16 0, i16* [[AA]], align 2
14530 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14531 // CHECK12-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
14532 // CHECK12-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
14533 // CHECK12-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
14534 // CHECK12-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
14535 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14536 // CHECK12-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
14537 // CHECK12-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
14538 // CHECK12-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
14539 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14540 // CHECK12-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
14541 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14542 // CHECK12-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
14543 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14544 // CHECK12:       omp.inner.for.cond:
14545 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
14546 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
14547 // CHECK12-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
14548 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14549 // CHECK12:       omp.inner.for.body:
14550 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
14551 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
14552 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
14553 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
14554 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14555 // CHECK12:       omp.body.continue:
14556 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14557 // CHECK12:       omp.inner.for.inc:
14558 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
14559 // CHECK12-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
14560 // CHECK12-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
14561 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
14562 // CHECK12:       omp.inner.for.end:
14563 // CHECK12-NEXT:    store i32 33, i32* [[I]], align 4
14564 // CHECK12-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
14565 // CHECK12-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
14566 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
14567 // CHECK12-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
14568 // CHECK12-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
14569 // CHECK12-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4
14570 // CHECK12-NEXT:    [[TMP10:%.*]] = load i64, i64* [[K]], align 8
14571 // CHECK12-NEXT:    store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8
14572 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
14573 // CHECK12:       omp.inner.for.cond9:
14574 // CHECK12-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14575 // CHECK12-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
14576 // CHECK12-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
14577 // CHECK12-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
14578 // CHECK12:       omp.inner.for.body11:
14579 // CHECK12-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14580 // CHECK12-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1
14581 // CHECK12-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
14582 // CHECK12-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7
14583 // CHECK12-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7
14584 // CHECK12-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14585 // CHECK12-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3
14586 // CHECK12-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
14587 // CHECK12-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
14588 // CHECK12-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7
14589 // CHECK12-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7
14590 // CHECK12-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
14591 // CHECK12-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7
14592 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
14593 // CHECK12:       omp.body.continue16:
14594 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
14595 // CHECK12:       omp.inner.for.inc17:
14596 // CHECK12-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14597 // CHECK12-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
14598 // CHECK12-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
14599 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]]
14600 // CHECK12:       omp.inner.for.end19:
14601 // CHECK12-NEXT:    store i32 1, i32* [[I7]], align 4
14602 // CHECK12-NEXT:    [[TMP18:%.*]] = load i64, i64* [[K8]], align 8
14603 // CHECK12-NEXT:    store i64 [[TMP18]], i64* [[K]], align 8
14604 // CHECK12-NEXT:    store i32 12, i32* [[LIN]], align 4
14605 // CHECK12-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
14606 // CHECK12-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
14607 // CHECK12-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
14608 // CHECK12-NEXT:    store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8
14609 // CHECK12-NEXT:    [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4
14610 // CHECK12-NEXT:    store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4
14611 // CHECK12-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
14612 // CHECK12-NEXT:    store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4
14613 // CHECK12-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
14614 // CHECK12-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
14615 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
14616 // CHECK12:       omp.inner.for.cond29:
14617 // CHECK12-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14618 // CHECK12-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10
14619 // CHECK12-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
14620 // CHECK12-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
14621 // CHECK12:       omp.inner.for.body31:
14622 // CHECK12-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14623 // CHECK12-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP24]], 400
14624 // CHECK12-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
14625 // CHECK12-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10
14626 // CHECK12-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10
14627 // CHECK12-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP25]] to i64
14628 // CHECK12-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14629 // CHECK12-NEXT:    [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
14630 // CHECK12-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]]
14631 // CHECK12-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
14632 // CHECK12-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
14633 // CHECK12-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10
14634 // CHECK12-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10
14635 // CHECK12-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP28]] to i64
14636 // CHECK12-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14637 // CHECK12-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
14638 // CHECK12-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]]
14639 // CHECK12-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
14640 // CHECK12-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
14641 // CHECK12-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10
14642 // CHECK12-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
14643 // CHECK12-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP31]] to i32
14644 // CHECK12-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
14645 // CHECK12-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
14646 // CHECK12-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10
14647 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
14648 // CHECK12:       omp.body.continue45:
14649 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
14650 // CHECK12:       omp.inner.for.inc46:
14651 // CHECK12-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14652 // CHECK12-NEXT:    [[ADD47:%.*]] = add i64 [[TMP32]], 1
14653 // CHECK12-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
14654 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]]
14655 // CHECK12:       omp.inner.for.end48:
14656 // CHECK12-NEXT:    store i64 400, i64* [[IT]], align 8
14657 // CHECK12-NEXT:    [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4
14658 // CHECK12-NEXT:    store i32 [[TMP33]], i32* [[LIN]], align 4
14659 // CHECK12-NEXT:    [[TMP34:%.*]] = load i32, i32* [[A28]], align 4
14660 // CHECK12-NEXT:    store i32 [[TMP34]], i32* [[A]], align 4
14661 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
14662 // CHECK12-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
14663 // CHECK12-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
14664 // CHECK12-NEXT:    store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4
14665 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
14666 // CHECK12:       omp.inner.for.cond54:
14667 // CHECK12-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
14668 // CHECK12-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13
14669 // CHECK12-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]]
14670 // CHECK12-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
14671 // CHECK12:       omp.inner.for.body56:
14672 // CHECK12-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
14673 // CHECK12-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4
14674 // CHECK12-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
14675 // CHECK12-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
14676 // CHECK12-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13
14677 // CHECK12-NEXT:    [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
14678 // CHECK12-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1
14679 // CHECK12-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13
14680 // CHECK12-NEXT:    [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
14681 // CHECK12-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP40]] to i32
14682 // CHECK12-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
14683 // CHECK12-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
14684 // CHECK12-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13
14685 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
14686 // CHECK12:       omp.body.continue64:
14687 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
14688 // CHECK12:       omp.inner.for.inc65:
14689 // CHECK12-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
14690 // CHECK12-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1
14691 // CHECK12-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
14692 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]]
14693 // CHECK12:       omp.inner.for.end67:
14694 // CHECK12-NEXT:    store i16 22, i16* [[IT53]], align 2
14695 // CHECK12-NEXT:    [[TMP42:%.*]] = load i32, i32* [[A]], align 4
14696 // CHECK12-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4
14697 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
14698 // CHECK12-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
14699 // CHECK12-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
14700 // CHECK12-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4
14701 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
14702 // CHECK12:       omp.inner.for.cond73:
14703 // CHECK12-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
14704 // CHECK12-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16
14705 // CHECK12-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]]
14706 // CHECK12-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
14707 // CHECK12:       omp.inner.for.body75:
14708 // CHECK12-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
14709 // CHECK12-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1
14710 // CHECK12-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
14711 // CHECK12-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
14712 // CHECK12-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16
14713 // CHECK12-NEXT:    [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
14714 // CHECK12-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1
14715 // CHECK12-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16
14716 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
14717 // CHECK12-NEXT:    [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
14718 // CHECK12-NEXT:    [[CONV80:%.*]] = fpext float [[TMP48]] to double
14719 // CHECK12-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
14720 // CHECK12-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
14721 // CHECK12-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
14722 // CHECK12-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
14723 // CHECK12-NEXT:    [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
14724 // CHECK12-NEXT:    [[CONV84:%.*]] = fpext float [[TMP49]] to double
14725 // CHECK12-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
14726 // CHECK12-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
14727 // CHECK12-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
14728 // CHECK12-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
14729 // CHECK12-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2
14730 // CHECK12-NEXT:    [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
14731 // CHECK12-NEXT:    [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00
14732 // CHECK12-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
14733 // CHECK12-NEXT:    [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]]
14734 // CHECK12-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]]
14735 // CHECK12-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3
14736 // CHECK12-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
14737 // CHECK12-NEXT:    [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00
14738 // CHECK12-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
14739 // CHECK12-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
14740 // CHECK12-NEXT:    [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
14741 // CHECK12-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1
14742 // CHECK12-NEXT:    store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16
14743 // CHECK12-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
14744 // CHECK12-NEXT:    [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
14745 // CHECK12-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP54]] to i32
14746 // CHECK12-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
14747 // CHECK12-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
14748 // CHECK12-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16
14749 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
14750 // CHECK12:       omp.body.continue97:
14751 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
14752 // CHECK12:       omp.inner.for.inc98:
14753 // CHECK12-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
14754 // CHECK12-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1
14755 // CHECK12-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
14756 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]]
14757 // CHECK12:       omp.inner.for.end100:
14758 // CHECK12-NEXT:    store i8 96, i8* [[IT72]], align 1
14759 // CHECK12-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
14760 // CHECK12-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
14761 // CHECK12-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
14762 // CHECK12-NEXT:    ret i32 [[TMP56]]
14763 //
14764 //
14765 // CHECK12-LABEL: define {{[^@]+}}@_Z3bari
14766 // CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
14767 // CHECK12-NEXT:  entry:
14768 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14769 // CHECK12-NEXT:    [[A:%.*]] = alloca i32, align 4
14770 // CHECK12-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
14771 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14772 // CHECK12-NEXT:    store i32 0, i32* [[A]], align 4
14773 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14774 // CHECK12-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
14775 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
14776 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
14777 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
14778 // CHECK12-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
14779 // CHECK12-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
14780 // CHECK12-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
14781 // CHECK12-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
14782 // CHECK12-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
14783 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
14784 // CHECK12-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
14785 // CHECK12-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
14786 // CHECK12-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
14787 // CHECK12-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
14788 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
14789 // CHECK12-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
14790 // CHECK12-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
14791 // CHECK12-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
14792 // CHECK12-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
14793 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
14794 // CHECK12-NEXT:    ret i32 [[TMP8]]
14795 //
14796 //
14797 // CHECK12-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
14798 // CHECK12-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
14799 // CHECK12-NEXT:  entry:
14800 // CHECK12-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
14801 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14802 // CHECK12-NEXT:    [[B:%.*]] = alloca i32, align 4
14803 // CHECK12-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
14804 // CHECK12-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
14805 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i64, align 4
14806 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
14807 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
14808 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
14809 // CHECK12-NEXT:    [[IT:%.*]] = alloca i64, align 8
14810 // CHECK12-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
14811 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14812 // CHECK12-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
14813 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
14814 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
14815 // CHECK12-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
14816 // CHECK12-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
14817 // CHECK12-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
14818 // CHECK12-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
14819 // CHECK12-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
14820 // CHECK12-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
14821 // CHECK12-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
14822 // CHECK12-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
14823 // CHECK12-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
14824 // CHECK12-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
14825 // CHECK12-NEXT:    store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8
14826 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14827 // CHECK12:       omp.inner.for.cond:
14828 // CHECK12-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
14829 // CHECK12-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19
14830 // CHECK12-NEXT:    [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]]
14831 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14832 // CHECK12:       omp.inner.for.body:
14833 // CHECK12-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
14834 // CHECK12-NEXT:    [[MUL:%.*]] = mul i64 [[TMP7]], 400
14835 // CHECK12-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
14836 // CHECK12-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19
14837 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
14838 // CHECK12-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP8]] to double
14839 // CHECK12-NEXT:    [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
14840 // CHECK12-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
14841 // CHECK12-NEXT:    store double [[ADD2]], double* [[A]], align 4, !llvm.access.group !19
14842 // CHECK12-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
14843 // CHECK12-NEXT:    [[TMP9:%.*]] = load double, double* [[A3]], align 4, !llvm.access.group !19
14844 // CHECK12-NEXT:    [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00
14845 // CHECK12-NEXT:    store double [[INC]], double* [[A3]], align 4, !llvm.access.group !19
14846 // CHECK12-NEXT:    [[CONV4:%.*]] = fptosi double [[INC]] to i16
14847 // CHECK12-NEXT:    [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]]
14848 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]]
14849 // CHECK12-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
14850 // CHECK12-NEXT:    store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !19
14851 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14852 // CHECK12:       omp.body.continue:
14853 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14854 // CHECK12:       omp.inner.for.inc:
14855 // CHECK12-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
14856 // CHECK12-NEXT:    [[ADD6:%.*]] = add i64 [[TMP11]], 1
14857 // CHECK12-NEXT:    store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
14858 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
14859 // CHECK12:       omp.inner.for.end:
14860 // CHECK12-NEXT:    store i64 400, i64* [[IT]], align 8
14861 // CHECK12-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
14862 // CHECK12-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
14863 // CHECK12-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i32 1
14864 // CHECK12-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2
14865 // CHECK12-NEXT:    [[CONV9:%.*]] = sext i16 [[TMP13]] to i32
14866 // CHECK12-NEXT:    [[TMP14:%.*]] = load i32, i32* [[B]], align 4
14867 // CHECK12-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]]
14868 // CHECK12-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
14869 // CHECK12-NEXT:    call void @llvm.stackrestore(i8* [[TMP15]])
14870 // CHECK12-NEXT:    ret i32 [[ADD10]]
14871 //
14872 //
14873 // CHECK12-LABEL: define {{[^@]+}}@_ZL7fstatici
14874 // CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
14875 // CHECK12-NEXT:  entry:
14876 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14877 // CHECK12-NEXT:    [[A:%.*]] = alloca i32, align 4
14878 // CHECK12-NEXT:    [[AA:%.*]] = alloca i16, align 2
14879 // CHECK12-NEXT:    [[AAA:%.*]] = alloca i8, align 1
14880 // CHECK12-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
14881 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14882 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14883 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14884 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14885 // CHECK12-NEXT:    store i32 0, i32* [[A]], align 4
14886 // CHECK12-NEXT:    store i16 0, i16* [[AA]], align 2
14887 // CHECK12-NEXT:    store i8 0, i8* [[AAA]], align 1
14888 // CHECK12-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14889 // CHECK12-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
14890 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
14891 // CHECK12-NEXT:    ret i32 [[TMP0]]
14892 //
14893 //
14894 // CHECK12-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
14895 // CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
14896 // CHECK12-NEXT:  entry:
14897 // CHECK12-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14898 // CHECK12-NEXT:    [[A:%.*]] = alloca i32, align 4
14899 // CHECK12-NEXT:    [[AA:%.*]] = alloca i16, align 2
14900 // CHECK12-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
14901 // CHECK12-NEXT:    [[TMP:%.*]] = alloca i64, align 4
14902 // CHECK12-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
14903 // CHECK12-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
14904 // CHECK12-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
14905 // CHECK12-NEXT:    [[I:%.*]] = alloca i64, align 8
14906 // CHECK12-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
14907 // CHECK12-NEXT:    store i32 0, i32* [[A]], align 4
14908 // CHECK12-NEXT:    store i16 0, i16* [[AA]], align 2
14909 // CHECK12-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
14910 // CHECK12-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
14911 // CHECK12-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
14912 // CHECK12-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
14913 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14914 // CHECK12:       omp.inner.for.cond:
14915 // CHECK12-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
14916 // CHECK12-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !22
14917 // CHECK12-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
14918 // CHECK12-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14919 // CHECK12:       omp.inner.for.body:
14920 // CHECK12-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
14921 // CHECK12-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
14922 // CHECK12-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
14923 // CHECK12-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !22
14924 // CHECK12-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22
14925 // CHECK12-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
14926 // CHECK12-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !22
14927 // CHECK12-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22
14928 // CHECK12-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
14929 // CHECK12-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
14930 // CHECK12-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
14931 // CHECK12-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !22
14932 // CHECK12-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
14933 // CHECK12-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
14934 // CHECK12-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
14935 // CHECK12-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
14936 // CHECK12-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14937 // CHECK12:       omp.body.continue:
14938 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14939 // CHECK12:       omp.inner.for.inc:
14940 // CHECK12-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
14941 // CHECK12-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
14942 // CHECK12-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
14943 // CHECK12-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
14944 // CHECK12:       omp.inner.for.end:
14945 // CHECK12-NEXT:    store i64 11, i64* [[I]], align 8
14946 // CHECK12-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
14947 // CHECK12-NEXT:    ret i32 [[TMP8]]
14948 //
14949 //
14950 // CHECK13-LABEL: define {{[^@]+}}@_Z7get_valv
14951 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
14952 // CHECK13-NEXT:  entry:
14953 // CHECK13-NEXT:    ret i64 0
14954 //
14955 //
14956 // CHECK13-LABEL: define {{[^@]+}}@_Z3fooi
14957 // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
14958 // CHECK13-NEXT:  entry:
14959 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
14960 // CHECK13-NEXT:    [[A:%.*]] = alloca i32, align 4
14961 // CHECK13-NEXT:    [[AA:%.*]] = alloca i16, align 2
14962 // CHECK13-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
14963 // CHECK13-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
14964 // CHECK13-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
14965 // CHECK13-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
14966 // CHECK13-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
14967 // CHECK13-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
14968 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14969 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14970 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14971 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14972 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
14973 // CHECK13-NEXT:    [[K:%.*]] = alloca i64, align 8
14974 // CHECK13-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
14975 // CHECK13-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
14976 // CHECK13-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
14977 // CHECK13-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
14978 // CHECK13-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
14979 // CHECK13-NEXT:    [[I7:%.*]] = alloca i32, align 4
14980 // CHECK13-NEXT:    [[K8:%.*]] = alloca i64, align 8
14981 // CHECK13-NEXT:    [[LIN:%.*]] = alloca i32, align 4
14982 // CHECK13-NEXT:    [[_TMP20:%.*]] = alloca i64, align 8
14983 // CHECK13-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
14984 // CHECK13-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
14985 // CHECK13-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
14986 // CHECK13-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
14987 // CHECK13-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
14988 // CHECK13-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
14989 // CHECK13-NEXT:    [[IT:%.*]] = alloca i64, align 8
14990 // CHECK13-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
14991 // CHECK13-NEXT:    [[A28:%.*]] = alloca i32, align 4
14992 // CHECK13-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
14993 // CHECK13-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
14994 // CHECK13-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
14995 // CHECK13-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
14996 // CHECK13-NEXT:    [[IT53:%.*]] = alloca i16, align 2
14997 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14998 // CHECK13-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
14999 // CHECK13-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
15000 // CHECK13-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
15001 // CHECK13-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
15002 // CHECK13-NEXT:    [[IT72:%.*]] = alloca i8, align 1
15003 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15004 // CHECK13-NEXT:    store i32 0, i32* [[A]], align 4
15005 // CHECK13-NEXT:    store i16 0, i16* [[AA]], align 2
15006 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15007 // CHECK13-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
15008 // CHECK13-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
15009 // CHECK13-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
15010 // CHECK13-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
15011 // CHECK13-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
15012 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
15013 // CHECK13-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
15014 // CHECK13-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
15015 // CHECK13-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
15016 // CHECK13-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
15017 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15018 // CHECK13-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
15019 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15020 // CHECK13-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
15021 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15022 // CHECK13:       omp.inner.for.cond:
15023 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
15024 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
15025 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
15026 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15027 // CHECK13:       omp.inner.for.body:
15028 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
15029 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5
15030 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
15031 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
15032 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15033 // CHECK13:       omp.body.continue:
15034 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15035 // CHECK13:       omp.inner.for.inc:
15036 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
15037 // CHECK13-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
15038 // CHECK13-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
15039 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
15040 // CHECK13:       omp.inner.for.end:
15041 // CHECK13-NEXT:    store i32 33, i32* [[I]], align 4
15042 // CHECK13-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
15043 // CHECK13-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
15044 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
15045 // CHECK13-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
15046 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
15047 // CHECK13-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4
15048 // CHECK13-NEXT:    [[TMP12:%.*]] = load i64, i64* [[K]], align 8
15049 // CHECK13-NEXT:    store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8
15050 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
15051 // CHECK13:       omp.inner.for.cond9:
15052 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15053 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
15054 // CHECK13-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
15055 // CHECK13-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
15056 // CHECK13:       omp.inner.for.body11:
15057 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15058 // CHECK13-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1
15059 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
15060 // CHECK13-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6
15061 // CHECK13-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6
15062 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15063 // CHECK13-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3
15064 // CHECK13-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
15065 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]]
15066 // CHECK13-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6
15067 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6
15068 // CHECK13-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
15069 // CHECK13-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6
15070 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
15071 // CHECK13:       omp.body.continue16:
15072 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
15073 // CHECK13:       omp.inner.for.inc17:
15074 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15075 // CHECK13-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
15076 // CHECK13-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15077 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]]
15078 // CHECK13:       omp.inner.for.end19:
15079 // CHECK13-NEXT:    store i32 1, i32* [[I7]], align 4
15080 // CHECK13-NEXT:    [[TMP20:%.*]] = load i64, i64* [[K8]], align 8
15081 // CHECK13-NEXT:    store i64 [[TMP20]], i64* [[K]], align 8
15082 // CHECK13-NEXT:    store i32 12, i32* [[LIN]], align 4
15083 // CHECK13-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
15084 // CHECK13-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
15085 // CHECK13-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
15086 // CHECK13-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8
15087 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4
15088 // CHECK13-NEXT:    store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4
15089 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A]], align 4
15090 // CHECK13-NEXT:    store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4
15091 // CHECK13-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
15092 // CHECK13-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
15093 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
15094 // CHECK13:       omp.inner.for.cond29:
15095 // CHECK13-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15096 // CHECK13-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9
15097 // CHECK13-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]]
15098 // CHECK13-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
15099 // CHECK13:       omp.inner.for.body31:
15100 // CHECK13-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15101 // CHECK13-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP26]], 400
15102 // CHECK13-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
15103 // CHECK13-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9
15104 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9
15105 // CHECK13-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP27]] to i64
15106 // CHECK13-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15107 // CHECK13-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
15108 // CHECK13-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]]
15109 // CHECK13-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
15110 // CHECK13-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
15111 // CHECK13-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9
15112 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9
15113 // CHECK13-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP30]] to i64
15114 // CHECK13-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15115 // CHECK13-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
15116 // CHECK13-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]]
15117 // CHECK13-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
15118 // CHECK13-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
15119 // CHECK13-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9
15120 // CHECK13-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
15121 // CHECK13-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP33]] to i32
15122 // CHECK13-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
15123 // CHECK13-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
15124 // CHECK13-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9
15125 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
15126 // CHECK13:       omp.body.continue45:
15127 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
15128 // CHECK13:       omp.inner.for.inc46:
15129 // CHECK13-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15130 // CHECK13-NEXT:    [[ADD47:%.*]] = add i64 [[TMP34]], 1
15131 // CHECK13-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15132 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]]
15133 // CHECK13:       omp.inner.for.end48:
15134 // CHECK13-NEXT:    store i64 400, i64* [[IT]], align 8
15135 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4
15136 // CHECK13-NEXT:    store i32 [[TMP35]], i32* [[LIN]], align 4
15137 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A28]], align 4
15138 // CHECK13-NEXT:    store i32 [[TMP36]], i32* [[A]], align 4
15139 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
15140 // CHECK13-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
15141 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
15142 // CHECK13-NEXT:    store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4
15143 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
15144 // CHECK13:       omp.inner.for.cond54:
15145 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
15146 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12
15147 // CHECK13-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]]
15148 // CHECK13-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
15149 // CHECK13:       omp.inner.for.body56:
15150 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
15151 // CHECK13-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4
15152 // CHECK13-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
15153 // CHECK13-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
15154 // CHECK13-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12
15155 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
15156 // CHECK13-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1
15157 // CHECK13-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12
15158 // CHECK13-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
15159 // CHECK13-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP42]] to i32
15160 // CHECK13-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
15161 // CHECK13-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
15162 // CHECK13-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12
15163 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
15164 // CHECK13:       omp.body.continue64:
15165 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
15166 // CHECK13:       omp.inner.for.inc65:
15167 // CHECK13-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
15168 // CHECK13-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1
15169 // CHECK13-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
15170 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]]
15171 // CHECK13:       omp.inner.for.end67:
15172 // CHECK13-NEXT:    store i16 22, i16* [[IT53]], align 2
15173 // CHECK13-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
15174 // CHECK13-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4
15175 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
15176 // CHECK13-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
15177 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
15178 // CHECK13-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4
15179 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
15180 // CHECK13:       omp.inner.for.cond73:
15181 // CHECK13-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
15182 // CHECK13-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15
15183 // CHECK13-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]]
15184 // CHECK13-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
15185 // CHECK13:       omp.inner.for.body75:
15186 // CHECK13-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
15187 // CHECK13-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1
15188 // CHECK13-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
15189 // CHECK13-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
15190 // CHECK13-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15
15191 // CHECK13-NEXT:    [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
15192 // CHECK13-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1
15193 // CHECK13-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15
15194 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
15195 // CHECK13-NEXT:    [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
15196 // CHECK13-NEXT:    [[CONV80:%.*]] = fpext float [[TMP50]] to double
15197 // CHECK13-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
15198 // CHECK13-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
15199 // CHECK13-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
15200 // CHECK13-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
15201 // CHECK13-NEXT:    [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
15202 // CHECK13-NEXT:    [[CONV84:%.*]] = fpext float [[TMP51]] to double
15203 // CHECK13-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
15204 // CHECK13-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
15205 // CHECK13-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
15206 // CHECK13-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
15207 // CHECK13-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2
15208 // CHECK13-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
15209 // CHECK13-NEXT:    [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00
15210 // CHECK13-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
15211 // CHECK13-NEXT:    [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]]
15212 // CHECK13-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]]
15213 // CHECK13-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3
15214 // CHECK13-NEXT:    [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
15215 // CHECK13-NEXT:    [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00
15216 // CHECK13-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
15217 // CHECK13-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
15218 // CHECK13-NEXT:    [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
15219 // CHECK13-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1
15220 // CHECK13-NEXT:    store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15
15221 // CHECK13-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
15222 // CHECK13-NEXT:    [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
15223 // CHECK13-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP56]] to i32
15224 // CHECK13-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
15225 // CHECK13-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
15226 // CHECK13-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15
15227 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
15228 // CHECK13:       omp.body.continue97:
15229 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
15230 // CHECK13:       omp.inner.for.inc98:
15231 // CHECK13-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
15232 // CHECK13-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1
15233 // CHECK13-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
15234 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]]
15235 // CHECK13:       omp.inner.for.end100:
15236 // CHECK13-NEXT:    store i8 96, i8* [[IT72]], align 1
15237 // CHECK13-NEXT:    [[TMP58:%.*]] = load i32, i32* [[A]], align 4
15238 // CHECK13-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
15239 // CHECK13-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
15240 // CHECK13-NEXT:    ret i32 [[TMP58]]
15241 //
15242 //
15243 // CHECK13-LABEL: define {{[^@]+}}@_Z3bari
15244 // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
15245 // CHECK13-NEXT:  entry:
15246 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15247 // CHECK13-NEXT:    [[A:%.*]] = alloca i32, align 4
15248 // CHECK13-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
15249 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15250 // CHECK13-NEXT:    store i32 0, i32* [[A]], align 4
15251 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15252 // CHECK13-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
15253 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
15254 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
15255 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
15256 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15257 // CHECK13-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
15258 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
15259 // CHECK13-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
15260 // CHECK13-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
15261 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
15262 // CHECK13-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
15263 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
15264 // CHECK13-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
15265 // CHECK13-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
15266 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
15267 // CHECK13-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
15268 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
15269 // CHECK13-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
15270 // CHECK13-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
15271 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
15272 // CHECK13-NEXT:    ret i32 [[TMP8]]
15273 //
15274 //
15275 // CHECK13-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
15276 // CHECK13-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
15277 // CHECK13-NEXT:  entry:
15278 // CHECK13-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
15279 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15280 // CHECK13-NEXT:    [[B:%.*]] = alloca i32, align 4
15281 // CHECK13-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
15282 // CHECK13-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
15283 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
15284 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i64, align 8
15285 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
15286 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
15287 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
15288 // CHECK13-NEXT:    [[IT:%.*]] = alloca i64, align 8
15289 // CHECK13-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
15290 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15291 // CHECK13-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
15292 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15293 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
15294 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
15295 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15296 // CHECK13-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
15297 // CHECK13-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
15298 // CHECK13-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
15299 // CHECK13-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
15300 // CHECK13-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
15301 // CHECK13-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
15302 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
15303 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
15304 // CHECK13-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
15305 // CHECK13-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
15306 // CHECK13-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
15307 // CHECK13-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
15308 // CHECK13-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
15309 // CHECK13-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
15310 // CHECK13-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
15311 // CHECK13-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
15312 // CHECK13-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
15313 // CHECK13:       omp_if.then:
15314 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15315 // CHECK13:       omp.inner.for.cond:
15316 // CHECK13-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
15317 // CHECK13-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
15318 // CHECK13-NEXT:    [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]]
15319 // CHECK13-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15320 // CHECK13:       omp.inner.for.body:
15321 // CHECK13-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
15322 // CHECK13-NEXT:    [[MUL:%.*]] = mul i64 [[TMP10]], 400
15323 // CHECK13-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
15324 // CHECK13-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
15325 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
15326 // CHECK13-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP11]] to double
15327 // CHECK13-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
15328 // CHECK13-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
15329 // CHECK13-NEXT:    store double [[ADD3]], double* [[A]], align 8, !nontemporal !19, !llvm.access.group !18
15330 // CHECK13-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
15331 // CHECK13-NEXT:    [[TMP12:%.*]] = load double, double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18
15332 // CHECK13-NEXT:    [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00
15333 // CHECK13-NEXT:    store double [[INC]], double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18
15334 // CHECK13-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
15335 // CHECK13-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
15336 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
15337 // CHECK13-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
15338 // CHECK13-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18
15339 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15340 // CHECK13:       omp.body.continue:
15341 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15342 // CHECK13:       omp.inner.for.inc:
15343 // CHECK13-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
15344 // CHECK13-NEXT:    [[ADD7:%.*]] = add i64 [[TMP14]], 1
15345 // CHECK13-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
15346 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
15347 // CHECK13:       omp.inner.for.end:
15348 // CHECK13-NEXT:    br label [[OMP_IF_END:%.*]]
15349 // CHECK13:       omp_if.else:
15350 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
15351 // CHECK13:       omp.inner.for.cond8:
15352 // CHECK13-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15353 // CHECK13-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
15354 // CHECK13-NEXT:    [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]]
15355 // CHECK13-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]]
15356 // CHECK13:       omp.inner.for.body10:
15357 // CHECK13-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15358 // CHECK13-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP17]], 400
15359 // CHECK13-NEXT:    [[SUB12:%.*]] = sub i64 2000, [[MUL11]]
15360 // CHECK13-NEXT:    store i64 [[SUB12]], i64* [[IT]], align 8
15361 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[B]], align 4
15362 // CHECK13-NEXT:    [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double
15363 // CHECK13-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00
15364 // CHECK13-NEXT:    [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
15365 // CHECK13-NEXT:    store double [[ADD14]], double* [[A15]], align 8
15366 // CHECK13-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
15367 // CHECK13-NEXT:    [[TMP19:%.*]] = load double, double* [[A16]], align 8
15368 // CHECK13-NEXT:    [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00
15369 // CHECK13-NEXT:    store double [[INC17]], double* [[A16]], align 8
15370 // CHECK13-NEXT:    [[CONV18:%.*]] = fptosi double [[INC17]] to i16
15371 // CHECK13-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]]
15372 // CHECK13-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]]
15373 // CHECK13-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i64 1
15374 // CHECK13-NEXT:    store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2
15375 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE21:%.*]]
15376 // CHECK13:       omp.body.continue21:
15377 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC22:%.*]]
15378 // CHECK13:       omp.inner.for.inc22:
15379 // CHECK13-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15380 // CHECK13-NEXT:    [[ADD23:%.*]] = add i64 [[TMP21]], 1
15381 // CHECK13-NEXT:    store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8
15382 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]]
15383 // CHECK13:       omp.inner.for.end24:
15384 // CHECK13-NEXT:    br label [[OMP_IF_END]]
15385 // CHECK13:       omp_if.end:
15386 // CHECK13-NEXT:    store i64 400, i64* [[IT]], align 8
15387 // CHECK13-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
15388 // CHECK13-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]]
15389 // CHECK13-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i64 1
15390 // CHECK13-NEXT:    [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
15391 // CHECK13-NEXT:    [[CONV27:%.*]] = sext i16 [[TMP23]] to i32
15392 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[B]], align 4
15393 // CHECK13-NEXT:    [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]]
15394 // CHECK13-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
15395 // CHECK13-NEXT:    call void @llvm.stackrestore(i8* [[TMP25]])
15396 // CHECK13-NEXT:    ret i32 [[ADD28]]
15397 //
15398 //
15399 // CHECK13-LABEL: define {{[^@]+}}@_ZL7fstatici
15400 // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
15401 // CHECK13-NEXT:  entry:
15402 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15403 // CHECK13-NEXT:    [[A:%.*]] = alloca i32, align 4
15404 // CHECK13-NEXT:    [[AA:%.*]] = alloca i16, align 2
15405 // CHECK13-NEXT:    [[AAA:%.*]] = alloca i8, align 1
15406 // CHECK13-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
15407 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15408 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15409 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15410 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15411 // CHECK13-NEXT:    store i32 0, i32* [[A]], align 4
15412 // CHECK13-NEXT:    store i16 0, i16* [[AA]], align 2
15413 // CHECK13-NEXT:    store i8 0, i8* [[AAA]], align 1
15414 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15415 // CHECK13-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
15416 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
15417 // CHECK13-NEXT:    ret i32 [[TMP0]]
15418 //
15419 //
15420 // CHECK13-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
15421 // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
15422 // CHECK13-NEXT:  entry:
15423 // CHECK13-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15424 // CHECK13-NEXT:    [[A:%.*]] = alloca i32, align 4
15425 // CHECK13-NEXT:    [[AA:%.*]] = alloca i16, align 2
15426 // CHECK13-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
15427 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i64, align 8
15428 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
15429 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
15430 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
15431 // CHECK13-NEXT:    [[I:%.*]] = alloca i64, align 8
15432 // CHECK13-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15433 // CHECK13-NEXT:    store i32 0, i32* [[A]], align 4
15434 // CHECK13-NEXT:    store i16 0, i16* [[AA]], align 2
15435 // CHECK13-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
15436 // CHECK13-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
15437 // CHECK13-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
15438 // CHECK13-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
15439 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15440 // CHECK13:       omp.inner.for.cond:
15441 // CHECK13-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
15442 // CHECK13-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !24
15443 // CHECK13-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
15444 // CHECK13-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15445 // CHECK13:       omp.inner.for.body:
15446 // CHECK13-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
15447 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
15448 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
15449 // CHECK13-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !24
15450 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
15451 // CHECK13-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
15452 // CHECK13-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24
15453 // CHECK13-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
15454 // CHECK13-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
15455 // CHECK13-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
15456 // CHECK13-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
15457 // CHECK13-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24
15458 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
15459 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
15460 // CHECK13-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
15461 // CHECK13-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
15462 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15463 // CHECK13:       omp.body.continue:
15464 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15465 // CHECK13:       omp.inner.for.inc:
15466 // CHECK13-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
15467 // CHECK13-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
15468 // CHECK13-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
15469 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
15470 // CHECK13:       omp.inner.for.end:
15471 // CHECK13-NEXT:    store i64 11, i64* [[I]], align 8
15472 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
15473 // CHECK13-NEXT:    ret i32 [[TMP8]]
15474 //
15475 //
15476 // CHECK14-LABEL: define {{[^@]+}}@_Z7get_valv
15477 // CHECK14-SAME: () #[[ATTR0:[0-9]+]] {
15478 // CHECK14-NEXT:  entry:
15479 // CHECK14-NEXT:    ret i64 0
15480 //
15481 //
15482 // CHECK14-LABEL: define {{[^@]+}}@_Z3fooi
15483 // CHECK14-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
15484 // CHECK14-NEXT:  entry:
15485 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15486 // CHECK14-NEXT:    [[A:%.*]] = alloca i32, align 4
15487 // CHECK14-NEXT:    [[AA:%.*]] = alloca i16, align 2
15488 // CHECK14-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
15489 // CHECK14-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
15490 // CHECK14-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
15491 // CHECK14-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
15492 // CHECK14-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
15493 // CHECK14-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
15494 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15495 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15496 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15497 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15498 // CHECK14-NEXT:    [[I:%.*]] = alloca i32, align 4
15499 // CHECK14-NEXT:    [[K:%.*]] = alloca i64, align 8
15500 // CHECK14-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
15501 // CHECK14-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
15502 // CHECK14-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
15503 // CHECK14-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
15504 // CHECK14-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
15505 // CHECK14-NEXT:    [[I7:%.*]] = alloca i32, align 4
15506 // CHECK14-NEXT:    [[K8:%.*]] = alloca i64, align 8
15507 // CHECK14-NEXT:    [[LIN:%.*]] = alloca i32, align 4
15508 // CHECK14-NEXT:    [[_TMP20:%.*]] = alloca i64, align 8
15509 // CHECK14-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
15510 // CHECK14-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
15511 // CHECK14-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
15512 // CHECK14-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
15513 // CHECK14-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
15514 // CHECK14-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
15515 // CHECK14-NEXT:    [[IT:%.*]] = alloca i64, align 8
15516 // CHECK14-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
15517 // CHECK14-NEXT:    [[A28:%.*]] = alloca i32, align 4
15518 // CHECK14-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
15519 // CHECK14-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
15520 // CHECK14-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
15521 // CHECK14-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
15522 // CHECK14-NEXT:    [[IT53:%.*]] = alloca i16, align 2
15523 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15524 // CHECK14-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
15525 // CHECK14-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
15526 // CHECK14-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
15527 // CHECK14-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
15528 // CHECK14-NEXT:    [[IT72:%.*]] = alloca i8, align 1
15529 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15530 // CHECK14-NEXT:    store i32 0, i32* [[A]], align 4
15531 // CHECK14-NEXT:    store i16 0, i16* [[AA]], align 2
15532 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15533 // CHECK14-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
15534 // CHECK14-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
15535 // CHECK14-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
15536 // CHECK14-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
15537 // CHECK14-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
15538 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
15539 // CHECK14-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
15540 // CHECK14-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
15541 // CHECK14-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
15542 // CHECK14-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
15543 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15544 // CHECK14-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
15545 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15546 // CHECK14-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
15547 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15548 // CHECK14:       omp.inner.for.cond:
15549 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
15550 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
15551 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
15552 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15553 // CHECK14:       omp.inner.for.body:
15554 // CHECK14-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
15555 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5
15556 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
15557 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
15558 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15559 // CHECK14:       omp.body.continue:
15560 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15561 // CHECK14:       omp.inner.for.inc:
15562 // CHECK14-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
15563 // CHECK14-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
15564 // CHECK14-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
15565 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
15566 // CHECK14:       omp.inner.for.end:
15567 // CHECK14-NEXT:    store i32 33, i32* [[I]], align 4
15568 // CHECK14-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
15569 // CHECK14-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
15570 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
15571 // CHECK14-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
15572 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
15573 // CHECK14-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4
15574 // CHECK14-NEXT:    [[TMP12:%.*]] = load i64, i64* [[K]], align 8
15575 // CHECK14-NEXT:    store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8
15576 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
15577 // CHECK14:       omp.inner.for.cond9:
15578 // CHECK14-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15579 // CHECK14-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
15580 // CHECK14-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
15581 // CHECK14-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
15582 // CHECK14:       omp.inner.for.body11:
15583 // CHECK14-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15584 // CHECK14-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1
15585 // CHECK14-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
15586 // CHECK14-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6
15587 // CHECK14-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6
15588 // CHECK14-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15589 // CHECK14-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3
15590 // CHECK14-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
15591 // CHECK14-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]]
15592 // CHECK14-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6
15593 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6
15594 // CHECK14-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
15595 // CHECK14-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6
15596 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
15597 // CHECK14:       omp.body.continue16:
15598 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
15599 // CHECK14:       omp.inner.for.inc17:
15600 // CHECK14-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15601 // CHECK14-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
15602 // CHECK14-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
15603 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]]
15604 // CHECK14:       omp.inner.for.end19:
15605 // CHECK14-NEXT:    store i32 1, i32* [[I7]], align 4
15606 // CHECK14-NEXT:    [[TMP20:%.*]] = load i64, i64* [[K8]], align 8
15607 // CHECK14-NEXT:    store i64 [[TMP20]], i64* [[K]], align 8
15608 // CHECK14-NEXT:    store i32 12, i32* [[LIN]], align 4
15609 // CHECK14-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
15610 // CHECK14-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
15611 // CHECK14-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
15612 // CHECK14-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8
15613 // CHECK14-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4
15614 // CHECK14-NEXT:    store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4
15615 // CHECK14-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A]], align 4
15616 // CHECK14-NEXT:    store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4
15617 // CHECK14-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
15618 // CHECK14-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
15619 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
15620 // CHECK14:       omp.inner.for.cond29:
15621 // CHECK14-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15622 // CHECK14-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9
15623 // CHECK14-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]]
15624 // CHECK14-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
15625 // CHECK14:       omp.inner.for.body31:
15626 // CHECK14-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15627 // CHECK14-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP26]], 400
15628 // CHECK14-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
15629 // CHECK14-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9
15630 // CHECK14-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9
15631 // CHECK14-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP27]] to i64
15632 // CHECK14-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15633 // CHECK14-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
15634 // CHECK14-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]]
15635 // CHECK14-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
15636 // CHECK14-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
15637 // CHECK14-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9
15638 // CHECK14-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9
15639 // CHECK14-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP30]] to i64
15640 // CHECK14-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15641 // CHECK14-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
15642 // CHECK14-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]]
15643 // CHECK14-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
15644 // CHECK14-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
15645 // CHECK14-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9
15646 // CHECK14-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
15647 // CHECK14-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP33]] to i32
15648 // CHECK14-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
15649 // CHECK14-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
15650 // CHECK14-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9
15651 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
15652 // CHECK14:       omp.body.continue45:
15653 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
15654 // CHECK14:       omp.inner.for.inc46:
15655 // CHECK14-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15656 // CHECK14-NEXT:    [[ADD47:%.*]] = add i64 [[TMP34]], 1
15657 // CHECK14-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
15658 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]]
15659 // CHECK14:       omp.inner.for.end48:
15660 // CHECK14-NEXT:    store i64 400, i64* [[IT]], align 8
15661 // CHECK14-NEXT:    [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4
15662 // CHECK14-NEXT:    store i32 [[TMP35]], i32* [[LIN]], align 4
15663 // CHECK14-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A28]], align 4
15664 // CHECK14-NEXT:    store i32 [[TMP36]], i32* [[A]], align 4
15665 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
15666 // CHECK14-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
15667 // CHECK14-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
15668 // CHECK14-NEXT:    store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4
15669 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
15670 // CHECK14:       omp.inner.for.cond54:
15671 // CHECK14-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
15672 // CHECK14-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12
15673 // CHECK14-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]]
15674 // CHECK14-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
15675 // CHECK14:       omp.inner.for.body56:
15676 // CHECK14-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
15677 // CHECK14-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4
15678 // CHECK14-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
15679 // CHECK14-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
15680 // CHECK14-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12
15681 // CHECK14-NEXT:    [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
15682 // CHECK14-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1
15683 // CHECK14-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12
15684 // CHECK14-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
15685 // CHECK14-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP42]] to i32
15686 // CHECK14-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
15687 // CHECK14-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
15688 // CHECK14-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12
15689 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
15690 // CHECK14:       omp.body.continue64:
15691 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
15692 // CHECK14:       omp.inner.for.inc65:
15693 // CHECK14-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
15694 // CHECK14-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1
15695 // CHECK14-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
15696 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]]
15697 // CHECK14:       omp.inner.for.end67:
15698 // CHECK14-NEXT:    store i16 22, i16* [[IT53]], align 2
15699 // CHECK14-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
15700 // CHECK14-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4
15701 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
15702 // CHECK14-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
15703 // CHECK14-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
15704 // CHECK14-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4
15705 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
15706 // CHECK14:       omp.inner.for.cond73:
15707 // CHECK14-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
15708 // CHECK14-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15
15709 // CHECK14-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]]
15710 // CHECK14-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
15711 // CHECK14:       omp.inner.for.body75:
15712 // CHECK14-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
15713 // CHECK14-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1
15714 // CHECK14-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
15715 // CHECK14-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
15716 // CHECK14-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15
15717 // CHECK14-NEXT:    [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
15718 // CHECK14-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1
15719 // CHECK14-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15
15720 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
15721 // CHECK14-NEXT:    [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
15722 // CHECK14-NEXT:    [[CONV80:%.*]] = fpext float [[TMP50]] to double
15723 // CHECK14-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
15724 // CHECK14-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
15725 // CHECK14-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
15726 // CHECK14-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
15727 // CHECK14-NEXT:    [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
15728 // CHECK14-NEXT:    [[CONV84:%.*]] = fpext float [[TMP51]] to double
15729 // CHECK14-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
15730 // CHECK14-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
15731 // CHECK14-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
15732 // CHECK14-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
15733 // CHECK14-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2
15734 // CHECK14-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
15735 // CHECK14-NEXT:    [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00
15736 // CHECK14-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
15737 // CHECK14-NEXT:    [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]]
15738 // CHECK14-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]]
15739 // CHECK14-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3
15740 // CHECK14-NEXT:    [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
15741 // CHECK14-NEXT:    [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00
15742 // CHECK14-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
15743 // CHECK14-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
15744 // CHECK14-NEXT:    [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
15745 // CHECK14-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1
15746 // CHECK14-NEXT:    store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15
15747 // CHECK14-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
15748 // CHECK14-NEXT:    [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
15749 // CHECK14-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP56]] to i32
15750 // CHECK14-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
15751 // CHECK14-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
15752 // CHECK14-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15
15753 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
15754 // CHECK14:       omp.body.continue97:
15755 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
15756 // CHECK14:       omp.inner.for.inc98:
15757 // CHECK14-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
15758 // CHECK14-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1
15759 // CHECK14-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
15760 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]]
15761 // CHECK14:       omp.inner.for.end100:
15762 // CHECK14-NEXT:    store i8 96, i8* [[IT72]], align 1
15763 // CHECK14-NEXT:    [[TMP58:%.*]] = load i32, i32* [[A]], align 4
15764 // CHECK14-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
15765 // CHECK14-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
15766 // CHECK14-NEXT:    ret i32 [[TMP58]]
15767 //
15768 //
15769 // CHECK14-LABEL: define {{[^@]+}}@_Z3bari
15770 // CHECK14-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
15771 // CHECK14-NEXT:  entry:
15772 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15773 // CHECK14-NEXT:    [[A:%.*]] = alloca i32, align 4
15774 // CHECK14-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
15775 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15776 // CHECK14-NEXT:    store i32 0, i32* [[A]], align 4
15777 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15778 // CHECK14-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
15779 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
15780 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
15781 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
15782 // CHECK14-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
15783 // CHECK14-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
15784 // CHECK14-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
15785 // CHECK14-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
15786 // CHECK14-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
15787 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
15788 // CHECK14-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
15789 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
15790 // CHECK14-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
15791 // CHECK14-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
15792 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
15793 // CHECK14-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
15794 // CHECK14-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
15795 // CHECK14-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
15796 // CHECK14-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
15797 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
15798 // CHECK14-NEXT:    ret i32 [[TMP8]]
15799 //
15800 //
15801 // CHECK14-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
15802 // CHECK14-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
15803 // CHECK14-NEXT:  entry:
15804 // CHECK14-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
15805 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15806 // CHECK14-NEXT:    [[B:%.*]] = alloca i32, align 4
15807 // CHECK14-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
15808 // CHECK14-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
15809 // CHECK14-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
15810 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i64, align 8
15811 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
15812 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
15813 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
15814 // CHECK14-NEXT:    [[IT:%.*]] = alloca i64, align 8
15815 // CHECK14-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
15816 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15817 // CHECK14-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
15818 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
15819 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
15820 // CHECK14-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
15821 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
15822 // CHECK14-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
15823 // CHECK14-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
15824 // CHECK14-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
15825 // CHECK14-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
15826 // CHECK14-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
15827 // CHECK14-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
15828 // CHECK14-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
15829 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
15830 // CHECK14-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
15831 // CHECK14-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
15832 // CHECK14-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
15833 // CHECK14-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
15834 // CHECK14-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
15835 // CHECK14-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
15836 // CHECK14-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
15837 // CHECK14-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
15838 // CHECK14-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
15839 // CHECK14:       omp_if.then:
15840 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15841 // CHECK14:       omp.inner.for.cond:
15842 // CHECK14-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
15843 // CHECK14-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
15844 // CHECK14-NEXT:    [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]]
15845 // CHECK14-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15846 // CHECK14:       omp.inner.for.body:
15847 // CHECK14-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
15848 // CHECK14-NEXT:    [[MUL:%.*]] = mul i64 [[TMP10]], 400
15849 // CHECK14-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
15850 // CHECK14-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
15851 // CHECK14-NEXT:    [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
15852 // CHECK14-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP11]] to double
15853 // CHECK14-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
15854 // CHECK14-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
15855 // CHECK14-NEXT:    store double [[ADD3]], double* [[A]], align 8, !nontemporal !19, !llvm.access.group !18
15856 // CHECK14-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
15857 // CHECK14-NEXT:    [[TMP12:%.*]] = load double, double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18
15858 // CHECK14-NEXT:    [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00
15859 // CHECK14-NEXT:    store double [[INC]], double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18
15860 // CHECK14-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
15861 // CHECK14-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
15862 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
15863 // CHECK14-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
15864 // CHECK14-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18
15865 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15866 // CHECK14:       omp.body.continue:
15867 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15868 // CHECK14:       omp.inner.for.inc:
15869 // CHECK14-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
15870 // CHECK14-NEXT:    [[ADD7:%.*]] = add i64 [[TMP14]], 1
15871 // CHECK14-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
15872 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
15873 // CHECK14:       omp.inner.for.end:
15874 // CHECK14-NEXT:    br label [[OMP_IF_END:%.*]]
15875 // CHECK14:       omp_if.else:
15876 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
15877 // CHECK14:       omp.inner.for.cond8:
15878 // CHECK14-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15879 // CHECK14-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
15880 // CHECK14-NEXT:    [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]]
15881 // CHECK14-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]]
15882 // CHECK14:       omp.inner.for.body10:
15883 // CHECK14-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15884 // CHECK14-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP17]], 400
15885 // CHECK14-NEXT:    [[SUB12:%.*]] = sub i64 2000, [[MUL11]]
15886 // CHECK14-NEXT:    store i64 [[SUB12]], i64* [[IT]], align 8
15887 // CHECK14-NEXT:    [[TMP18:%.*]] = load i32, i32* [[B]], align 4
15888 // CHECK14-NEXT:    [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double
15889 // CHECK14-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00
15890 // CHECK14-NEXT:    [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
15891 // CHECK14-NEXT:    store double [[ADD14]], double* [[A15]], align 8
15892 // CHECK14-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
15893 // CHECK14-NEXT:    [[TMP19:%.*]] = load double, double* [[A16]], align 8
15894 // CHECK14-NEXT:    [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00
15895 // CHECK14-NEXT:    store double [[INC17]], double* [[A16]], align 8
15896 // CHECK14-NEXT:    [[CONV18:%.*]] = fptosi double [[INC17]] to i16
15897 // CHECK14-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]]
15898 // CHECK14-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]]
15899 // CHECK14-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i64 1
15900 // CHECK14-NEXT:    store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2
15901 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE21:%.*]]
15902 // CHECK14:       omp.body.continue21:
15903 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC22:%.*]]
15904 // CHECK14:       omp.inner.for.inc22:
15905 // CHECK14-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
15906 // CHECK14-NEXT:    [[ADD23:%.*]] = add i64 [[TMP21]], 1
15907 // CHECK14-NEXT:    store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8
15908 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]]
15909 // CHECK14:       omp.inner.for.end24:
15910 // CHECK14-NEXT:    br label [[OMP_IF_END]]
15911 // CHECK14:       omp_if.end:
15912 // CHECK14-NEXT:    store i64 400, i64* [[IT]], align 8
15913 // CHECK14-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
15914 // CHECK14-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]]
15915 // CHECK14-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i64 1
15916 // CHECK14-NEXT:    [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
15917 // CHECK14-NEXT:    [[CONV27:%.*]] = sext i16 [[TMP23]] to i32
15918 // CHECK14-NEXT:    [[TMP24:%.*]] = load i32, i32* [[B]], align 4
15919 // CHECK14-NEXT:    [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]]
15920 // CHECK14-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
15921 // CHECK14-NEXT:    call void @llvm.stackrestore(i8* [[TMP25]])
15922 // CHECK14-NEXT:    ret i32 [[ADD28]]
15923 //
15924 //
15925 // CHECK14-LABEL: define {{[^@]+}}@_ZL7fstatici
15926 // CHECK14-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
15927 // CHECK14-NEXT:  entry:
15928 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15929 // CHECK14-NEXT:    [[A:%.*]] = alloca i32, align 4
15930 // CHECK14-NEXT:    [[AA:%.*]] = alloca i16, align 2
15931 // CHECK14-NEXT:    [[AAA:%.*]] = alloca i8, align 1
15932 // CHECK14-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
15933 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15934 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15935 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15936 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15937 // CHECK14-NEXT:    store i32 0, i32* [[A]], align 4
15938 // CHECK14-NEXT:    store i16 0, i16* [[AA]], align 2
15939 // CHECK14-NEXT:    store i8 0, i8* [[AAA]], align 1
15940 // CHECK14-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15941 // CHECK14-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
15942 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
15943 // CHECK14-NEXT:    ret i32 [[TMP0]]
15944 //
15945 //
15946 // CHECK14-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
15947 // CHECK14-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
15948 // CHECK14-NEXT:  entry:
15949 // CHECK14-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
15950 // CHECK14-NEXT:    [[A:%.*]] = alloca i32, align 4
15951 // CHECK14-NEXT:    [[AA:%.*]] = alloca i16, align 2
15952 // CHECK14-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
15953 // CHECK14-NEXT:    [[TMP:%.*]] = alloca i64, align 8
15954 // CHECK14-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
15955 // CHECK14-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
15956 // CHECK14-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
15957 // CHECK14-NEXT:    [[I:%.*]] = alloca i64, align 8
15958 // CHECK14-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
15959 // CHECK14-NEXT:    store i32 0, i32* [[A]], align 4
15960 // CHECK14-NEXT:    store i16 0, i16* [[AA]], align 2
15961 // CHECK14-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
15962 // CHECK14-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
15963 // CHECK14-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
15964 // CHECK14-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
15965 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15966 // CHECK14:       omp.inner.for.cond:
15967 // CHECK14-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
15968 // CHECK14-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !24
15969 // CHECK14-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
15970 // CHECK14-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15971 // CHECK14:       omp.inner.for.body:
15972 // CHECK14-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
15973 // CHECK14-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
15974 // CHECK14-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
15975 // CHECK14-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !24
15976 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
15977 // CHECK14-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
15978 // CHECK14-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24
15979 // CHECK14-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
15980 // CHECK14-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
15981 // CHECK14-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
15982 // CHECK14-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
15983 // CHECK14-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24
15984 // CHECK14-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
15985 // CHECK14-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
15986 // CHECK14-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
15987 // CHECK14-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
15988 // CHECK14-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15989 // CHECK14:       omp.body.continue:
15990 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15991 // CHECK14:       omp.inner.for.inc:
15992 // CHECK14-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
15993 // CHECK14-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
15994 // CHECK14-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
15995 // CHECK14-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
15996 // CHECK14:       omp.inner.for.end:
15997 // CHECK14-NEXT:    store i64 11, i64* [[I]], align 8
15998 // CHECK14-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
15999 // CHECK14-NEXT:    ret i32 [[TMP8]]
16000 //
16001 //
16002 // CHECK15-LABEL: define {{[^@]+}}@_Z7get_valv
16003 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] {
16004 // CHECK15-NEXT:  entry:
16005 // CHECK15-NEXT:    ret i64 0
16006 //
16007 //
16008 // CHECK15-LABEL: define {{[^@]+}}@_Z3fooi
16009 // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
16010 // CHECK15-NEXT:  entry:
16011 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16012 // CHECK15-NEXT:    [[A:%.*]] = alloca i32, align 4
16013 // CHECK15-NEXT:    [[AA:%.*]] = alloca i16, align 2
16014 // CHECK15-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
16015 // CHECK15-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
16016 // CHECK15-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
16017 // CHECK15-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
16018 // CHECK15-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
16019 // CHECK15-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
16020 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16021 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16022 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16023 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16024 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
16025 // CHECK15-NEXT:    [[K:%.*]] = alloca i64, align 8
16026 // CHECK15-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
16027 // CHECK15-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
16028 // CHECK15-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
16029 // CHECK15-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
16030 // CHECK15-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
16031 // CHECK15-NEXT:    [[I7:%.*]] = alloca i32, align 4
16032 // CHECK15-NEXT:    [[K8:%.*]] = alloca i64, align 8
16033 // CHECK15-NEXT:    [[LIN:%.*]] = alloca i32, align 4
16034 // CHECK15-NEXT:    [[_TMP20:%.*]] = alloca i64, align 4
16035 // CHECK15-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
16036 // CHECK15-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
16037 // CHECK15-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
16038 // CHECK15-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
16039 // CHECK15-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
16040 // CHECK15-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
16041 // CHECK15-NEXT:    [[IT:%.*]] = alloca i64, align 8
16042 // CHECK15-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
16043 // CHECK15-NEXT:    [[A28:%.*]] = alloca i32, align 4
16044 // CHECK15-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
16045 // CHECK15-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
16046 // CHECK15-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
16047 // CHECK15-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
16048 // CHECK15-NEXT:    [[IT53:%.*]] = alloca i16, align 2
16049 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16050 // CHECK15-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
16051 // CHECK15-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
16052 // CHECK15-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
16053 // CHECK15-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
16054 // CHECK15-NEXT:    [[IT72:%.*]] = alloca i8, align 1
16055 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16056 // CHECK15-NEXT:    store i32 0, i32* [[A]], align 4
16057 // CHECK15-NEXT:    store i16 0, i16* [[AA]], align 2
16058 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16059 // CHECK15-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
16060 // CHECK15-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
16061 // CHECK15-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
16062 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
16063 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16064 // CHECK15-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
16065 // CHECK15-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
16066 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
16067 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16068 // CHECK15-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
16069 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16070 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
16071 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16072 // CHECK15:       omp.inner.for.cond:
16073 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
16074 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
16075 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
16076 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16077 // CHECK15:       omp.inner.for.body:
16078 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
16079 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
16080 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
16081 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
16082 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16083 // CHECK15:       omp.body.continue:
16084 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16085 // CHECK15:       omp.inner.for.inc:
16086 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
16087 // CHECK15-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
16088 // CHECK15-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
16089 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
16090 // CHECK15:       omp.inner.for.end:
16091 // CHECK15-NEXT:    store i32 33, i32* [[I]], align 4
16092 // CHECK15-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
16093 // CHECK15-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
16094 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
16095 // CHECK15-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
16096 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
16097 // CHECK15-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4
16098 // CHECK15-NEXT:    [[TMP10:%.*]] = load i64, i64* [[K]], align 8
16099 // CHECK15-NEXT:    store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8
16100 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
16101 // CHECK15:       omp.inner.for.cond9:
16102 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16103 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
16104 // CHECK15-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
16105 // CHECK15-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
16106 // CHECK15:       omp.inner.for.body11:
16107 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16108 // CHECK15-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1
16109 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
16110 // CHECK15-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7
16111 // CHECK15-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7
16112 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16113 // CHECK15-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3
16114 // CHECK15-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
16115 // CHECK15-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
16116 // CHECK15-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7
16117 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7
16118 // CHECK15-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
16119 // CHECK15-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7
16120 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
16121 // CHECK15:       omp.body.continue16:
16122 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
16123 // CHECK15:       omp.inner.for.inc17:
16124 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16125 // CHECK15-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
16126 // CHECK15-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16127 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]]
16128 // CHECK15:       omp.inner.for.end19:
16129 // CHECK15-NEXT:    store i32 1, i32* [[I7]], align 4
16130 // CHECK15-NEXT:    [[TMP18:%.*]] = load i64, i64* [[K8]], align 8
16131 // CHECK15-NEXT:    store i64 [[TMP18]], i64* [[K]], align 8
16132 // CHECK15-NEXT:    store i32 12, i32* [[LIN]], align 4
16133 // CHECK15-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
16134 // CHECK15-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
16135 // CHECK15-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
16136 // CHECK15-NEXT:    store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8
16137 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4
16138 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4
16139 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
16140 // CHECK15-NEXT:    store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4
16141 // CHECK15-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
16142 // CHECK15-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
16143 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
16144 // CHECK15:       omp.inner.for.cond29:
16145 // CHECK15-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16146 // CHECK15-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10
16147 // CHECK15-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
16148 // CHECK15-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
16149 // CHECK15:       omp.inner.for.body31:
16150 // CHECK15-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16151 // CHECK15-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP24]], 400
16152 // CHECK15-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
16153 // CHECK15-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10
16154 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10
16155 // CHECK15-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP25]] to i64
16156 // CHECK15-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16157 // CHECK15-NEXT:    [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
16158 // CHECK15-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]]
16159 // CHECK15-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
16160 // CHECK15-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
16161 // CHECK15-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10
16162 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10
16163 // CHECK15-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP28]] to i64
16164 // CHECK15-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16165 // CHECK15-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
16166 // CHECK15-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]]
16167 // CHECK15-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
16168 // CHECK15-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
16169 // CHECK15-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10
16170 // CHECK15-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
16171 // CHECK15-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP31]] to i32
16172 // CHECK15-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
16173 // CHECK15-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
16174 // CHECK15-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10
16175 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
16176 // CHECK15:       omp.body.continue45:
16177 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
16178 // CHECK15:       omp.inner.for.inc46:
16179 // CHECK15-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16180 // CHECK15-NEXT:    [[ADD47:%.*]] = add i64 [[TMP32]], 1
16181 // CHECK15-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16182 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]]
16183 // CHECK15:       omp.inner.for.end48:
16184 // CHECK15-NEXT:    store i64 400, i64* [[IT]], align 8
16185 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4
16186 // CHECK15-NEXT:    store i32 [[TMP33]], i32* [[LIN]], align 4
16187 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[A28]], align 4
16188 // CHECK15-NEXT:    store i32 [[TMP34]], i32* [[A]], align 4
16189 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
16190 // CHECK15-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
16191 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
16192 // CHECK15-NEXT:    store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4
16193 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
16194 // CHECK15:       omp.inner.for.cond54:
16195 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
16196 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13
16197 // CHECK15-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]]
16198 // CHECK15-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
16199 // CHECK15:       omp.inner.for.body56:
16200 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
16201 // CHECK15-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4
16202 // CHECK15-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
16203 // CHECK15-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
16204 // CHECK15-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13
16205 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
16206 // CHECK15-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1
16207 // CHECK15-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13
16208 // CHECK15-NEXT:    [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
16209 // CHECK15-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP40]] to i32
16210 // CHECK15-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
16211 // CHECK15-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
16212 // CHECK15-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13
16213 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
16214 // CHECK15:       omp.body.continue64:
16215 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
16216 // CHECK15:       omp.inner.for.inc65:
16217 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
16218 // CHECK15-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1
16219 // CHECK15-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
16220 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]]
16221 // CHECK15:       omp.inner.for.end67:
16222 // CHECK15-NEXT:    store i16 22, i16* [[IT53]], align 2
16223 // CHECK15-NEXT:    [[TMP42:%.*]] = load i32, i32* [[A]], align 4
16224 // CHECK15-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4
16225 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
16226 // CHECK15-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
16227 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
16228 // CHECK15-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4
16229 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
16230 // CHECK15:       omp.inner.for.cond73:
16231 // CHECK15-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
16232 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16
16233 // CHECK15-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]]
16234 // CHECK15-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
16235 // CHECK15:       omp.inner.for.body75:
16236 // CHECK15-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
16237 // CHECK15-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1
16238 // CHECK15-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
16239 // CHECK15-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
16240 // CHECK15-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16
16241 // CHECK15-NEXT:    [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
16242 // CHECK15-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1
16243 // CHECK15-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16
16244 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
16245 // CHECK15-NEXT:    [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
16246 // CHECK15-NEXT:    [[CONV80:%.*]] = fpext float [[TMP48]] to double
16247 // CHECK15-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
16248 // CHECK15-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
16249 // CHECK15-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
16250 // CHECK15-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
16251 // CHECK15-NEXT:    [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
16252 // CHECK15-NEXT:    [[CONV84:%.*]] = fpext float [[TMP49]] to double
16253 // CHECK15-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
16254 // CHECK15-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
16255 // CHECK15-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
16256 // CHECK15-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
16257 // CHECK15-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2
16258 // CHECK15-NEXT:    [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
16259 // CHECK15-NEXT:    [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00
16260 // CHECK15-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
16261 // CHECK15-NEXT:    [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]]
16262 // CHECK15-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]]
16263 // CHECK15-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3
16264 // CHECK15-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
16265 // CHECK15-NEXT:    [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00
16266 // CHECK15-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
16267 // CHECK15-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
16268 // CHECK15-NEXT:    [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
16269 // CHECK15-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1
16270 // CHECK15-NEXT:    store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16
16271 // CHECK15-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
16272 // CHECK15-NEXT:    [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
16273 // CHECK15-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP54]] to i32
16274 // CHECK15-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
16275 // CHECK15-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
16276 // CHECK15-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16
16277 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
16278 // CHECK15:       omp.body.continue97:
16279 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
16280 // CHECK15:       omp.inner.for.inc98:
16281 // CHECK15-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
16282 // CHECK15-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1
16283 // CHECK15-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
16284 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]]
16285 // CHECK15:       omp.inner.for.end100:
16286 // CHECK15-NEXT:    store i8 96, i8* [[IT72]], align 1
16287 // CHECK15-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
16288 // CHECK15-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
16289 // CHECK15-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
16290 // CHECK15-NEXT:    ret i32 [[TMP56]]
16291 //
16292 //
16293 // CHECK15-LABEL: define {{[^@]+}}@_Z3bari
16294 // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
16295 // CHECK15-NEXT:  entry:
16296 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16297 // CHECK15-NEXT:    [[A:%.*]] = alloca i32, align 4
16298 // CHECK15-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
16299 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16300 // CHECK15-NEXT:    store i32 0, i32* [[A]], align 4
16301 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16302 // CHECK15-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
16303 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
16304 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
16305 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
16306 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16307 // CHECK15-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
16308 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
16309 // CHECK15-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
16310 // CHECK15-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
16311 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
16312 // CHECK15-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
16313 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
16314 // CHECK15-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
16315 // CHECK15-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
16316 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
16317 // CHECK15-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
16318 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
16319 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
16320 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
16321 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
16322 // CHECK15-NEXT:    ret i32 [[TMP8]]
16323 //
16324 //
16325 // CHECK15-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
16326 // CHECK15-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
16327 // CHECK15-NEXT:  entry:
16328 // CHECK15-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
16329 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16330 // CHECK15-NEXT:    [[B:%.*]] = alloca i32, align 4
16331 // CHECK15-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
16332 // CHECK15-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
16333 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
16334 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i64, align 4
16335 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
16336 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
16337 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
16338 // CHECK15-NEXT:    [[IT:%.*]] = alloca i64, align 8
16339 // CHECK15-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
16340 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16341 // CHECK15-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
16342 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16343 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
16344 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
16345 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16346 // CHECK15-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
16347 // CHECK15-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
16348 // CHECK15-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
16349 // CHECK15-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
16350 // CHECK15-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
16351 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
16352 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
16353 // CHECK15-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
16354 // CHECK15-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
16355 // CHECK15-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
16356 // CHECK15-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
16357 // CHECK15-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
16358 // CHECK15-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
16359 // CHECK15-NEXT:    [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
16360 // CHECK15-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
16361 // CHECK15-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
16362 // CHECK15:       omp_if.then:
16363 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16364 // CHECK15:       omp.inner.for.cond:
16365 // CHECK15-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
16366 // CHECK15-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19
16367 // CHECK15-NEXT:    [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
16368 // CHECK15-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16369 // CHECK15:       omp.inner.for.body:
16370 // CHECK15-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
16371 // CHECK15-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
16372 // CHECK15-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
16373 // CHECK15-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19
16374 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
16375 // CHECK15-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP10]] to double
16376 // CHECK15-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
16377 // CHECK15-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
16378 // CHECK15-NEXT:    store double [[ADD3]], double* [[A]], align 4, !nontemporal !20, !llvm.access.group !19
16379 // CHECK15-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16380 // CHECK15-NEXT:    [[TMP11:%.*]] = load double, double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19
16381 // CHECK15-NEXT:    [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00
16382 // CHECK15-NEXT:    store double [[INC]], double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19
16383 // CHECK15-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
16384 // CHECK15-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
16385 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
16386 // CHECK15-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
16387 // CHECK15-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19
16388 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16389 // CHECK15:       omp.body.continue:
16390 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16391 // CHECK15:       omp.inner.for.inc:
16392 // CHECK15-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
16393 // CHECK15-NEXT:    [[ADD7:%.*]] = add i64 [[TMP13]], 1
16394 // CHECK15-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
16395 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
16396 // CHECK15:       omp.inner.for.end:
16397 // CHECK15-NEXT:    br label [[OMP_IF_END:%.*]]
16398 // CHECK15:       omp_if.else:
16399 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
16400 // CHECK15:       omp.inner.for.cond8:
16401 // CHECK15-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16402 // CHECK15-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
16403 // CHECK15-NEXT:    [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]]
16404 // CHECK15-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]]
16405 // CHECK15:       omp.inner.for.body10:
16406 // CHECK15-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16407 // CHECK15-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP16]], 400
16408 // CHECK15-NEXT:    [[SUB12:%.*]] = sub i64 2000, [[MUL11]]
16409 // CHECK15-NEXT:    store i64 [[SUB12]], i64* [[IT]], align 8
16410 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[B]], align 4
16411 // CHECK15-NEXT:    [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double
16412 // CHECK15-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00
16413 // CHECK15-NEXT:    [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16414 // CHECK15-NEXT:    store double [[ADD14]], double* [[A15]], align 4
16415 // CHECK15-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16416 // CHECK15-NEXT:    [[TMP18:%.*]] = load double, double* [[A16]], align 4
16417 // CHECK15-NEXT:    [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00
16418 // CHECK15-NEXT:    store double [[INC17]], double* [[A16]], align 4
16419 // CHECK15-NEXT:    [[CONV18:%.*]] = fptosi double [[INC17]] to i16
16420 // CHECK15-NEXT:    [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]]
16421 // CHECK15-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]]
16422 // CHECK15-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i32 1
16423 // CHECK15-NEXT:    store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2
16424 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE21:%.*]]
16425 // CHECK15:       omp.body.continue21:
16426 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC22:%.*]]
16427 // CHECK15:       omp.inner.for.inc22:
16428 // CHECK15-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16429 // CHECK15-NEXT:    [[ADD23:%.*]] = add i64 [[TMP20]], 1
16430 // CHECK15-NEXT:    store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8
16431 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]]
16432 // CHECK15:       omp.inner.for.end24:
16433 // CHECK15-NEXT:    br label [[OMP_IF_END]]
16434 // CHECK15:       omp_if.end:
16435 // CHECK15-NEXT:    store i64 400, i64* [[IT]], align 8
16436 // CHECK15-NEXT:    [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]]
16437 // CHECK15-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]]
16438 // CHECK15-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1
16439 // CHECK15-NEXT:    [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
16440 // CHECK15-NEXT:    [[CONV27:%.*]] = sext i16 [[TMP22]] to i32
16441 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[B]], align 4
16442 // CHECK15-NEXT:    [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]]
16443 // CHECK15-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
16444 // CHECK15-NEXT:    call void @llvm.stackrestore(i8* [[TMP24]])
16445 // CHECK15-NEXT:    ret i32 [[ADD28]]
16446 //
16447 //
16448 // CHECK15-LABEL: define {{[^@]+}}@_ZL7fstatici
16449 // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
16450 // CHECK15-NEXT:  entry:
16451 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16452 // CHECK15-NEXT:    [[A:%.*]] = alloca i32, align 4
16453 // CHECK15-NEXT:    [[AA:%.*]] = alloca i16, align 2
16454 // CHECK15-NEXT:    [[AAA:%.*]] = alloca i8, align 1
16455 // CHECK15-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
16456 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16457 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16458 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16459 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16460 // CHECK15-NEXT:    store i32 0, i32* [[A]], align 4
16461 // CHECK15-NEXT:    store i16 0, i16* [[AA]], align 2
16462 // CHECK15-NEXT:    store i8 0, i8* [[AAA]], align 1
16463 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16464 // CHECK15-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
16465 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
16466 // CHECK15-NEXT:    ret i32 [[TMP0]]
16467 //
16468 //
16469 // CHECK15-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
16470 // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
16471 // CHECK15-NEXT:  entry:
16472 // CHECK15-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16473 // CHECK15-NEXT:    [[A:%.*]] = alloca i32, align 4
16474 // CHECK15-NEXT:    [[AA:%.*]] = alloca i16, align 2
16475 // CHECK15-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
16476 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i64, align 4
16477 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
16478 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
16479 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
16480 // CHECK15-NEXT:    [[I:%.*]] = alloca i64, align 8
16481 // CHECK15-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16482 // CHECK15-NEXT:    store i32 0, i32* [[A]], align 4
16483 // CHECK15-NEXT:    store i16 0, i16* [[AA]], align 2
16484 // CHECK15-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
16485 // CHECK15-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
16486 // CHECK15-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
16487 // CHECK15-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
16488 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16489 // CHECK15:       omp.inner.for.cond:
16490 // CHECK15-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
16491 // CHECK15-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !25
16492 // CHECK15-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
16493 // CHECK15-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16494 // CHECK15:       omp.inner.for.body:
16495 // CHECK15-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
16496 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
16497 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
16498 // CHECK15-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !25
16499 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
16500 // CHECK15-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
16501 // CHECK15-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25
16502 // CHECK15-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
16503 // CHECK15-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
16504 // CHECK15-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
16505 // CHECK15-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
16506 // CHECK15-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25
16507 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
16508 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
16509 // CHECK15-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
16510 // CHECK15-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
16511 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16512 // CHECK15:       omp.body.continue:
16513 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16514 // CHECK15:       omp.inner.for.inc:
16515 // CHECK15-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
16516 // CHECK15-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
16517 // CHECK15-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
16518 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
16519 // CHECK15:       omp.inner.for.end:
16520 // CHECK15-NEXT:    store i64 11, i64* [[I]], align 8
16521 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
16522 // CHECK15-NEXT:    ret i32 [[TMP8]]
16523 //
16524 //
16525 // CHECK16-LABEL: define {{[^@]+}}@_Z7get_valv
16526 // CHECK16-SAME: () #[[ATTR0:[0-9]+]] {
16527 // CHECK16-NEXT:  entry:
16528 // CHECK16-NEXT:    ret i64 0
16529 //
16530 //
16531 // CHECK16-LABEL: define {{[^@]+}}@_Z3fooi
16532 // CHECK16-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
16533 // CHECK16-NEXT:  entry:
16534 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16535 // CHECK16-NEXT:    [[A:%.*]] = alloca i32, align 4
16536 // CHECK16-NEXT:    [[AA:%.*]] = alloca i16, align 2
16537 // CHECK16-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
16538 // CHECK16-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
16539 // CHECK16-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
16540 // CHECK16-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
16541 // CHECK16-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
16542 // CHECK16-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
16543 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16544 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16545 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16546 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
16547 // CHECK16-NEXT:    [[I:%.*]] = alloca i32, align 4
16548 // CHECK16-NEXT:    [[K:%.*]] = alloca i64, align 8
16549 // CHECK16-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
16550 // CHECK16-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
16551 // CHECK16-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
16552 // CHECK16-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
16553 // CHECK16-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
16554 // CHECK16-NEXT:    [[I7:%.*]] = alloca i32, align 4
16555 // CHECK16-NEXT:    [[K8:%.*]] = alloca i64, align 8
16556 // CHECK16-NEXT:    [[LIN:%.*]] = alloca i32, align 4
16557 // CHECK16-NEXT:    [[_TMP20:%.*]] = alloca i64, align 4
16558 // CHECK16-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
16559 // CHECK16-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
16560 // CHECK16-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
16561 // CHECK16-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
16562 // CHECK16-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
16563 // CHECK16-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
16564 // CHECK16-NEXT:    [[IT:%.*]] = alloca i64, align 8
16565 // CHECK16-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
16566 // CHECK16-NEXT:    [[A28:%.*]] = alloca i32, align 4
16567 // CHECK16-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
16568 // CHECK16-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
16569 // CHECK16-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
16570 // CHECK16-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
16571 // CHECK16-NEXT:    [[IT53:%.*]] = alloca i16, align 2
16572 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16573 // CHECK16-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
16574 // CHECK16-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
16575 // CHECK16-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
16576 // CHECK16-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
16577 // CHECK16-NEXT:    [[IT72:%.*]] = alloca i8, align 1
16578 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16579 // CHECK16-NEXT:    store i32 0, i32* [[A]], align 4
16580 // CHECK16-NEXT:    store i16 0, i16* [[AA]], align 2
16581 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16582 // CHECK16-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
16583 // CHECK16-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
16584 // CHECK16-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
16585 // CHECK16-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
16586 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16587 // CHECK16-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
16588 // CHECK16-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
16589 // CHECK16-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
16590 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16591 // CHECK16-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
16592 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
16593 // CHECK16-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
16594 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16595 // CHECK16:       omp.inner.for.cond:
16596 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
16597 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
16598 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
16599 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16600 // CHECK16:       omp.inner.for.body:
16601 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
16602 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
16603 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
16604 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
16605 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16606 // CHECK16:       omp.body.continue:
16607 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16608 // CHECK16:       omp.inner.for.inc:
16609 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
16610 // CHECK16-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
16611 // CHECK16-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
16612 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
16613 // CHECK16:       omp.inner.for.end:
16614 // CHECK16-NEXT:    store i32 33, i32* [[I]], align 4
16615 // CHECK16-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
16616 // CHECK16-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
16617 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
16618 // CHECK16-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
16619 // CHECK16-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
16620 // CHECK16-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4
16621 // CHECK16-NEXT:    [[TMP10:%.*]] = load i64, i64* [[K]], align 8
16622 // CHECK16-NEXT:    store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8
16623 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
16624 // CHECK16:       omp.inner.for.cond9:
16625 // CHECK16-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16626 // CHECK16-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
16627 // CHECK16-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
16628 // CHECK16-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
16629 // CHECK16:       omp.inner.for.body11:
16630 // CHECK16-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16631 // CHECK16-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1
16632 // CHECK16-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
16633 // CHECK16-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7
16634 // CHECK16-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7
16635 // CHECK16-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16636 // CHECK16-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3
16637 // CHECK16-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
16638 // CHECK16-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
16639 // CHECK16-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7
16640 // CHECK16-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7
16641 // CHECK16-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
16642 // CHECK16-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7
16643 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
16644 // CHECK16:       omp.body.continue16:
16645 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
16646 // CHECK16:       omp.inner.for.inc17:
16647 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16648 // CHECK16-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
16649 // CHECK16-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
16650 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]]
16651 // CHECK16:       omp.inner.for.end19:
16652 // CHECK16-NEXT:    store i32 1, i32* [[I7]], align 4
16653 // CHECK16-NEXT:    [[TMP18:%.*]] = load i64, i64* [[K8]], align 8
16654 // CHECK16-NEXT:    store i64 [[TMP18]], i64* [[K]], align 8
16655 // CHECK16-NEXT:    store i32 12, i32* [[LIN]], align 4
16656 // CHECK16-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
16657 // CHECK16-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
16658 // CHECK16-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
16659 // CHECK16-NEXT:    store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8
16660 // CHECK16-NEXT:    [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4
16661 // CHECK16-NEXT:    store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4
16662 // CHECK16-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
16663 // CHECK16-NEXT:    store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4
16664 // CHECK16-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
16665 // CHECK16-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
16666 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
16667 // CHECK16:       omp.inner.for.cond29:
16668 // CHECK16-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16669 // CHECK16-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10
16670 // CHECK16-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
16671 // CHECK16-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
16672 // CHECK16:       omp.inner.for.body31:
16673 // CHECK16-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16674 // CHECK16-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP24]], 400
16675 // CHECK16-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
16676 // CHECK16-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10
16677 // CHECK16-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10
16678 // CHECK16-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP25]] to i64
16679 // CHECK16-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16680 // CHECK16-NEXT:    [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
16681 // CHECK16-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]]
16682 // CHECK16-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
16683 // CHECK16-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
16684 // CHECK16-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10
16685 // CHECK16-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10
16686 // CHECK16-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP28]] to i64
16687 // CHECK16-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16688 // CHECK16-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
16689 // CHECK16-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]]
16690 // CHECK16-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
16691 // CHECK16-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
16692 // CHECK16-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10
16693 // CHECK16-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
16694 // CHECK16-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP31]] to i32
16695 // CHECK16-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
16696 // CHECK16-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
16697 // CHECK16-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10
16698 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
16699 // CHECK16:       omp.body.continue45:
16700 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
16701 // CHECK16:       omp.inner.for.inc46:
16702 // CHECK16-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16703 // CHECK16-NEXT:    [[ADD47:%.*]] = add i64 [[TMP32]], 1
16704 // CHECK16-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
16705 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]]
16706 // CHECK16:       omp.inner.for.end48:
16707 // CHECK16-NEXT:    store i64 400, i64* [[IT]], align 8
16708 // CHECK16-NEXT:    [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4
16709 // CHECK16-NEXT:    store i32 [[TMP33]], i32* [[LIN]], align 4
16710 // CHECK16-NEXT:    [[TMP34:%.*]] = load i32, i32* [[A28]], align 4
16711 // CHECK16-NEXT:    store i32 [[TMP34]], i32* [[A]], align 4
16712 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
16713 // CHECK16-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
16714 // CHECK16-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
16715 // CHECK16-NEXT:    store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4
16716 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
16717 // CHECK16:       omp.inner.for.cond54:
16718 // CHECK16-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
16719 // CHECK16-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13
16720 // CHECK16-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]]
16721 // CHECK16-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
16722 // CHECK16:       omp.inner.for.body56:
16723 // CHECK16-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
16724 // CHECK16-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4
16725 // CHECK16-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
16726 // CHECK16-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
16727 // CHECK16-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13
16728 // CHECK16-NEXT:    [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
16729 // CHECK16-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1
16730 // CHECK16-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13
16731 // CHECK16-NEXT:    [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
16732 // CHECK16-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP40]] to i32
16733 // CHECK16-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
16734 // CHECK16-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
16735 // CHECK16-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13
16736 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
16737 // CHECK16:       omp.body.continue64:
16738 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
16739 // CHECK16:       omp.inner.for.inc65:
16740 // CHECK16-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
16741 // CHECK16-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1
16742 // CHECK16-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
16743 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]]
16744 // CHECK16:       omp.inner.for.end67:
16745 // CHECK16-NEXT:    store i16 22, i16* [[IT53]], align 2
16746 // CHECK16-NEXT:    [[TMP42:%.*]] = load i32, i32* [[A]], align 4
16747 // CHECK16-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4
16748 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
16749 // CHECK16-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
16750 // CHECK16-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
16751 // CHECK16-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4
16752 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
16753 // CHECK16:       omp.inner.for.cond73:
16754 // CHECK16-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
16755 // CHECK16-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16
16756 // CHECK16-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]]
16757 // CHECK16-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
16758 // CHECK16:       omp.inner.for.body75:
16759 // CHECK16-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
16760 // CHECK16-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1
16761 // CHECK16-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
16762 // CHECK16-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
16763 // CHECK16-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16
16764 // CHECK16-NEXT:    [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
16765 // CHECK16-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1
16766 // CHECK16-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16
16767 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
16768 // CHECK16-NEXT:    [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
16769 // CHECK16-NEXT:    [[CONV80:%.*]] = fpext float [[TMP48]] to double
16770 // CHECK16-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
16771 // CHECK16-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
16772 // CHECK16-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
16773 // CHECK16-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
16774 // CHECK16-NEXT:    [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
16775 // CHECK16-NEXT:    [[CONV84:%.*]] = fpext float [[TMP49]] to double
16776 // CHECK16-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
16777 // CHECK16-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
16778 // CHECK16-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
16779 // CHECK16-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
16780 // CHECK16-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2
16781 // CHECK16-NEXT:    [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
16782 // CHECK16-NEXT:    [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00
16783 // CHECK16-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
16784 // CHECK16-NEXT:    [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]]
16785 // CHECK16-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]]
16786 // CHECK16-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3
16787 // CHECK16-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
16788 // CHECK16-NEXT:    [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00
16789 // CHECK16-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
16790 // CHECK16-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
16791 // CHECK16-NEXT:    [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
16792 // CHECK16-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1
16793 // CHECK16-NEXT:    store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16
16794 // CHECK16-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
16795 // CHECK16-NEXT:    [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
16796 // CHECK16-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP54]] to i32
16797 // CHECK16-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
16798 // CHECK16-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
16799 // CHECK16-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16
16800 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
16801 // CHECK16:       omp.body.continue97:
16802 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
16803 // CHECK16:       omp.inner.for.inc98:
16804 // CHECK16-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
16805 // CHECK16-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1
16806 // CHECK16-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
16807 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]]
16808 // CHECK16:       omp.inner.for.end100:
16809 // CHECK16-NEXT:    store i8 96, i8* [[IT72]], align 1
16810 // CHECK16-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
16811 // CHECK16-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
16812 // CHECK16-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
16813 // CHECK16-NEXT:    ret i32 [[TMP56]]
16814 //
16815 //
16816 // CHECK16-LABEL: define {{[^@]+}}@_Z3bari
16817 // CHECK16-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
16818 // CHECK16-NEXT:  entry:
16819 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16820 // CHECK16-NEXT:    [[A:%.*]] = alloca i32, align 4
16821 // CHECK16-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
16822 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16823 // CHECK16-NEXT:    store i32 0, i32* [[A]], align 4
16824 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16825 // CHECK16-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
16826 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
16827 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
16828 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
16829 // CHECK16-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
16830 // CHECK16-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
16831 // CHECK16-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
16832 // CHECK16-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
16833 // CHECK16-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
16834 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
16835 // CHECK16-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
16836 // CHECK16-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
16837 // CHECK16-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
16838 // CHECK16-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
16839 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
16840 // CHECK16-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
16841 // CHECK16-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
16842 // CHECK16-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
16843 // CHECK16-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
16844 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
16845 // CHECK16-NEXT:    ret i32 [[TMP8]]
16846 //
16847 //
16848 // CHECK16-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
16849 // CHECK16-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
16850 // CHECK16-NEXT:  entry:
16851 // CHECK16-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
16852 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16853 // CHECK16-NEXT:    [[B:%.*]] = alloca i32, align 4
16854 // CHECK16-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
16855 // CHECK16-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
16856 // CHECK16-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
16857 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i64, align 4
16858 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
16859 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
16860 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
16861 // CHECK16-NEXT:    [[IT:%.*]] = alloca i64, align 8
16862 // CHECK16-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
16863 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16864 // CHECK16-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
16865 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
16866 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
16867 // CHECK16-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
16868 // CHECK16-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
16869 // CHECK16-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
16870 // CHECK16-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
16871 // CHECK16-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
16872 // CHECK16-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
16873 // CHECK16-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
16874 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
16875 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
16876 // CHECK16-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
16877 // CHECK16-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
16878 // CHECK16-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
16879 // CHECK16-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
16880 // CHECK16-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
16881 // CHECK16-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
16882 // CHECK16-NEXT:    [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
16883 // CHECK16-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
16884 // CHECK16-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
16885 // CHECK16:       omp_if.then:
16886 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
16887 // CHECK16:       omp.inner.for.cond:
16888 // CHECK16-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
16889 // CHECK16-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19
16890 // CHECK16-NEXT:    [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
16891 // CHECK16-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16892 // CHECK16:       omp.inner.for.body:
16893 // CHECK16-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
16894 // CHECK16-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
16895 // CHECK16-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
16896 // CHECK16-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19
16897 // CHECK16-NEXT:    [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
16898 // CHECK16-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP10]] to double
16899 // CHECK16-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
16900 // CHECK16-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
16901 // CHECK16-NEXT:    store double [[ADD3]], double* [[A]], align 4, !nontemporal !20, !llvm.access.group !19
16902 // CHECK16-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16903 // CHECK16-NEXT:    [[TMP11:%.*]] = load double, double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19
16904 // CHECK16-NEXT:    [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00
16905 // CHECK16-NEXT:    store double [[INC]], double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19
16906 // CHECK16-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
16907 // CHECK16-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
16908 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
16909 // CHECK16-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
16910 // CHECK16-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19
16911 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
16912 // CHECK16:       omp.body.continue:
16913 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
16914 // CHECK16:       omp.inner.for.inc:
16915 // CHECK16-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
16916 // CHECK16-NEXT:    [[ADD7:%.*]] = add i64 [[TMP13]], 1
16917 // CHECK16-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
16918 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
16919 // CHECK16:       omp.inner.for.end:
16920 // CHECK16-NEXT:    br label [[OMP_IF_END:%.*]]
16921 // CHECK16:       omp_if.else:
16922 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
16923 // CHECK16:       omp.inner.for.cond8:
16924 // CHECK16-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16925 // CHECK16-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
16926 // CHECK16-NEXT:    [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]]
16927 // CHECK16-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]]
16928 // CHECK16:       omp.inner.for.body10:
16929 // CHECK16-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16930 // CHECK16-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP16]], 400
16931 // CHECK16-NEXT:    [[SUB12:%.*]] = sub i64 2000, [[MUL11]]
16932 // CHECK16-NEXT:    store i64 [[SUB12]], i64* [[IT]], align 8
16933 // CHECK16-NEXT:    [[TMP17:%.*]] = load i32, i32* [[B]], align 4
16934 // CHECK16-NEXT:    [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double
16935 // CHECK16-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00
16936 // CHECK16-NEXT:    [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16937 // CHECK16-NEXT:    store double [[ADD14]], double* [[A15]], align 4
16938 // CHECK16-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
16939 // CHECK16-NEXT:    [[TMP18:%.*]] = load double, double* [[A16]], align 4
16940 // CHECK16-NEXT:    [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00
16941 // CHECK16-NEXT:    store double [[INC17]], double* [[A16]], align 4
16942 // CHECK16-NEXT:    [[CONV18:%.*]] = fptosi double [[INC17]] to i16
16943 // CHECK16-NEXT:    [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]]
16944 // CHECK16-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]]
16945 // CHECK16-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i32 1
16946 // CHECK16-NEXT:    store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2
16947 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE21:%.*]]
16948 // CHECK16:       omp.body.continue21:
16949 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC22:%.*]]
16950 // CHECK16:       omp.inner.for.inc22:
16951 // CHECK16-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
16952 // CHECK16-NEXT:    [[ADD23:%.*]] = add i64 [[TMP20]], 1
16953 // CHECK16-NEXT:    store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8
16954 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]]
16955 // CHECK16:       omp.inner.for.end24:
16956 // CHECK16-NEXT:    br label [[OMP_IF_END]]
16957 // CHECK16:       omp_if.end:
16958 // CHECK16-NEXT:    store i64 400, i64* [[IT]], align 8
16959 // CHECK16-NEXT:    [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]]
16960 // CHECK16-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]]
16961 // CHECK16-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1
16962 // CHECK16-NEXT:    [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
16963 // CHECK16-NEXT:    [[CONV27:%.*]] = sext i16 [[TMP22]] to i32
16964 // CHECK16-NEXT:    [[TMP23:%.*]] = load i32, i32* [[B]], align 4
16965 // CHECK16-NEXT:    [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]]
16966 // CHECK16-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
16967 // CHECK16-NEXT:    call void @llvm.stackrestore(i8* [[TMP24]])
16968 // CHECK16-NEXT:    ret i32 [[ADD28]]
16969 //
16970 //
16971 // CHECK16-LABEL: define {{[^@]+}}@_ZL7fstatici
16972 // CHECK16-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
16973 // CHECK16-NEXT:  entry:
16974 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16975 // CHECK16-NEXT:    [[A:%.*]] = alloca i32, align 4
16976 // CHECK16-NEXT:    [[AA:%.*]] = alloca i16, align 2
16977 // CHECK16-NEXT:    [[AAA:%.*]] = alloca i8, align 1
16978 // CHECK16-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
16979 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i32, align 4
16980 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
16981 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
16982 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
16983 // CHECK16-NEXT:    store i32 0, i32* [[A]], align 4
16984 // CHECK16-NEXT:    store i16 0, i16* [[AA]], align 2
16985 // CHECK16-NEXT:    store i8 0, i8* [[AAA]], align 1
16986 // CHECK16-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
16987 // CHECK16-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
16988 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
16989 // CHECK16-NEXT:    ret i32 [[TMP0]]
16990 //
16991 //
16992 // CHECK16-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
16993 // CHECK16-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
16994 // CHECK16-NEXT:  entry:
16995 // CHECK16-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
16996 // CHECK16-NEXT:    [[A:%.*]] = alloca i32, align 4
16997 // CHECK16-NEXT:    [[AA:%.*]] = alloca i16, align 2
16998 // CHECK16-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
16999 // CHECK16-NEXT:    [[TMP:%.*]] = alloca i64, align 4
17000 // CHECK16-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
17001 // CHECK16-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
17002 // CHECK16-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
17003 // CHECK16-NEXT:    [[I:%.*]] = alloca i64, align 8
17004 // CHECK16-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
17005 // CHECK16-NEXT:    store i32 0, i32* [[A]], align 4
17006 // CHECK16-NEXT:    store i16 0, i16* [[AA]], align 2
17007 // CHECK16-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
17008 // CHECK16-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
17009 // CHECK16-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
17010 // CHECK16-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
17011 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17012 // CHECK16:       omp.inner.for.cond:
17013 // CHECK16-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
17014 // CHECK16-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !25
17015 // CHECK16-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
17016 // CHECK16-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17017 // CHECK16:       omp.inner.for.body:
17018 // CHECK16-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
17019 // CHECK16-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
17020 // CHECK16-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
17021 // CHECK16-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !25
17022 // CHECK16-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
17023 // CHECK16-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
17024 // CHECK16-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25
17025 // CHECK16-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
17026 // CHECK16-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
17027 // CHECK16-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
17028 // CHECK16-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
17029 // CHECK16-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25
17030 // CHECK16-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
17031 // CHECK16-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
17032 // CHECK16-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
17033 // CHECK16-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
17034 // CHECK16-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17035 // CHECK16:       omp.body.continue:
17036 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17037 // CHECK16:       omp.inner.for.inc:
17038 // CHECK16-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
17039 // CHECK16-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
17040 // CHECK16-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
17041 // CHECK16-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
17042 // CHECK16:       omp.inner.for.end:
17043 // CHECK16-NEXT:    store i64 11, i64* [[I]], align 8
17044 // CHECK16-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
17045 // CHECK16-NEXT:    ret i32 [[TMP8]]
17046 //
17047 //
17048 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
17049 // CHECK17-SAME: () #[[ATTR0:[0-9]+]] {
17050 // CHECK17-NEXT:  entry:
17051 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
17052 // CHECK17-NEXT:    ret void
17053 //
17054 //
17055 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined.
17056 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
17057 // CHECK17-NEXT:  entry:
17058 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17059 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17060 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17061 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17062 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17063 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17064 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17065 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17066 // CHECK17-NEXT:    [[I:%.*]] = alloca i32, align 4
17067 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17068 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17069 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17070 // CHECK17-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
17071 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17072 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17073 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17074 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
17075 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17076 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17077 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
17078 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17079 // CHECK17:       cond.true:
17080 // CHECK17-NEXT:    br label [[COND_END:%.*]]
17081 // CHECK17:       cond.false:
17082 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17083 // CHECK17-NEXT:    br label [[COND_END]]
17084 // CHECK17:       cond.end:
17085 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
17086 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17087 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17088 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
17089 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17090 // CHECK17:       omp.inner.for.cond:
17091 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
17092 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
17093 // CHECK17-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
17094 // CHECK17-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17095 // CHECK17:       omp.inner.for.body:
17096 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
17097 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
17098 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
17099 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
17100 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17101 // CHECK17:       omp.body.continue:
17102 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17103 // CHECK17:       omp.inner.for.inc:
17104 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
17105 // CHECK17-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
17106 // CHECK17-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
17107 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
17108 // CHECK17:       omp.inner.for.end:
17109 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17110 // CHECK17:       omp.loop.exit:
17111 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
17112 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17113 // CHECK17-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
17114 // CHECK17-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17115 // CHECK17:       .omp.final.then:
17116 // CHECK17-NEXT:    store i32 33, i32* [[I]], align 4
17117 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17118 // CHECK17:       .omp.final.done:
17119 // CHECK17-NEXT:    ret void
17120 //
17121 //
17122 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
17123 // CHECK17-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] {
17124 // CHECK17-NEXT:  entry:
17125 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17126 // CHECK17-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
17127 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17128 // CHECK17-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
17129 // CHECK17-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
17130 // CHECK17-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
17131 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
17132 // CHECK17-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
17133 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17134 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
17135 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
17136 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17137 // CHECK17-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
17138 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
17139 // CHECK17-NEXT:    store i16 [[TMP0]], i16* [[CONV3]], align 2
17140 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
17141 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
17142 // CHECK17-NEXT:    [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
17143 // CHECK17-NEXT:    store i32 [[TMP2]], i32* [[CONV4]], align 4
17144 // CHECK17-NEXT:    [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
17145 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4
17146 // CHECK17-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
17147 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[CONV5]], align 4
17148 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8
17149 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]])
17150 // CHECK17-NEXT:    ret void
17151 //
17152 //
17153 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..1
17154 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
17155 // CHECK17-NEXT:  entry:
17156 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17157 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17158 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17159 // CHECK17-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
17160 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17161 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
17162 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i64, align 8
17163 // CHECK17-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
17164 // CHECK17-NEXT:    [[DOTLINEAR_START3:%.*]] = alloca i32, align 4
17165 // CHECK17-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
17166 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
17167 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
17168 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
17169 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17170 // CHECK17-NEXT:    [[IT:%.*]] = alloca i64, align 8
17171 // CHECK17-NEXT:    [[LIN4:%.*]] = alloca i32, align 4
17172 // CHECK17-NEXT:    [[A5:%.*]] = alloca i32, align 4
17173 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17174 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17175 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
17176 // CHECK17-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
17177 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17178 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
17179 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
17180 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17181 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4
17182 // CHECK17-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
17183 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4
17184 // CHECK17-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4
17185 // CHECK17-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]]
17186 // CHECK17-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
17187 // CHECK17-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
17188 // CHECK17-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
17189 // CHECK17-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
17190 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17191 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17192 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
17193 // CHECK17-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
17194 // CHECK17-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
17195 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
17196 // CHECK17-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
17197 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17198 // CHECK17:       cond.true:
17199 // CHECK17-NEXT:    br label [[COND_END:%.*]]
17200 // CHECK17:       cond.false:
17201 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
17202 // CHECK17-NEXT:    br label [[COND_END]]
17203 // CHECK17:       cond.end:
17204 // CHECK17-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
17205 // CHECK17-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
17206 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
17207 // CHECK17-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
17208 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17209 // CHECK17:       omp.inner.for.cond:
17210 // CHECK17-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
17211 // CHECK17-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !17
17212 // CHECK17-NEXT:    [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
17213 // CHECK17-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17214 // CHECK17:       omp.inner.for.body:
17215 // CHECK17-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
17216 // CHECK17-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
17217 // CHECK17-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
17218 // CHECK17-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !17
17219 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !17
17220 // CHECK17-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP10]] to i64
17221 // CHECK17-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
17222 // CHECK17-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17
17223 // CHECK17-NEXT:    [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]]
17224 // CHECK17-NEXT:    [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]]
17225 // CHECK17-NEXT:    [[CONV9:%.*]] = trunc i64 [[ADD]] to i32
17226 // CHECK17-NEXT:    store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !17
17227 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !17
17228 // CHECK17-NEXT:    [[CONV10:%.*]] = sext i32 [[TMP13]] to i64
17229 // CHECK17-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
17230 // CHECK17-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17
17231 // CHECK17-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]]
17232 // CHECK17-NEXT:    [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]]
17233 // CHECK17-NEXT:    [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32
17234 // CHECK17-NEXT:    store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !17
17235 // CHECK17-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !17
17236 // CHECK17-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP16]] to i32
17237 // CHECK17-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
17238 // CHECK17-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
17239 // CHECK17-NEXT:    store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group !17
17240 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17241 // CHECK17:       omp.body.continue:
17242 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17243 // CHECK17:       omp.inner.for.inc:
17244 // CHECK17-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
17245 // CHECK17-NEXT:    [[ADD17:%.*]] = add i64 [[TMP17]], 1
17246 // CHECK17-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
17247 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
17248 // CHECK17:       omp.inner.for.end:
17249 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17250 // CHECK17:       omp.loop.exit:
17251 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
17252 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17253 // CHECK17-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
17254 // CHECK17-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17255 // CHECK17:       .omp.final.then:
17256 // CHECK17-NEXT:    store i64 400, i64* [[IT]], align 8
17257 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17258 // CHECK17:       .omp.final.done:
17259 // CHECK17-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17260 // CHECK17-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
17261 // CHECK17-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
17262 // CHECK17:       .omp.linear.pu:
17263 // CHECK17-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4
17264 // CHECK17-NEXT:    store i32 [[TMP22]], i32* [[CONV1]], align 4
17265 // CHECK17-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A5]], align 4
17266 // CHECK17-NEXT:    store i32 [[TMP23]], i32* [[CONV2]], align 4
17267 // CHECK17-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
17268 // CHECK17:       .omp.linear.pu.done:
17269 // CHECK17-NEXT:    ret void
17270 //
17271 //
17272 // CHECK17-LABEL: define {{[^@]+}}@_Z7get_valv
17273 // CHECK17-SAME: () #[[ATTR3:[0-9]+]] {
17274 // CHECK17-NEXT:  entry:
17275 // CHECK17-NEXT:    ret i64 0
17276 //
17277 //
17278 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
17279 // CHECK17-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
17280 // CHECK17-NEXT:  entry:
17281 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17282 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17283 // CHECK17-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
17284 // CHECK17-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
17285 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17286 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
17287 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17288 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
17289 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
17290 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
17291 // CHECK17-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
17292 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
17293 // CHECK17-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
17294 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
17295 // CHECK17-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
17296 // CHECK17-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
17297 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
17298 // CHECK17-NEXT:    ret void
17299 //
17300 //
17301 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..2
17302 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
17303 // CHECK17-NEXT:  entry:
17304 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17305 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17306 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17307 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17308 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17309 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i16, align 2
17310 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17311 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17312 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17313 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17314 // CHECK17-NEXT:    [[IT:%.*]] = alloca i16, align 2
17315 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17316 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17317 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17318 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
17319 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17320 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
17321 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17322 // CHECK17-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
17323 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17324 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17325 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17326 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
17327 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17328 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17329 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
17330 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17331 // CHECK17:       cond.true:
17332 // CHECK17-NEXT:    br label [[COND_END:%.*]]
17333 // CHECK17:       cond.false:
17334 // CHECK17-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17335 // CHECK17-NEXT:    br label [[COND_END]]
17336 // CHECK17:       cond.end:
17337 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
17338 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17339 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17340 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
17341 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17342 // CHECK17:       omp.inner.for.cond:
17343 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
17344 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
17345 // CHECK17-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
17346 // CHECK17-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17347 // CHECK17:       omp.inner.for.body:
17348 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
17349 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
17350 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
17351 // CHECK17-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
17352 // CHECK17-NEXT:    store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !20
17353 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !20
17354 // CHECK17-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
17355 // CHECK17-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !20
17356 // CHECK17-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !20
17357 // CHECK17-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
17358 // CHECK17-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
17359 // CHECK17-NEXT:    [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
17360 // CHECK17-NEXT:    store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group !20
17361 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17362 // CHECK17:       omp.body.continue:
17363 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17364 // CHECK17:       omp.inner.for.inc:
17365 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
17366 // CHECK17-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
17367 // CHECK17-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
17368 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
17369 // CHECK17:       omp.inner.for.end:
17370 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17371 // CHECK17:       omp.loop.exit:
17372 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
17373 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17374 // CHECK17-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
17375 // CHECK17-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17376 // CHECK17:       .omp.final.then:
17377 // CHECK17-NEXT:    store i16 22, i16* [[IT]], align 2
17378 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17379 // CHECK17:       .omp.final.done:
17380 // CHECK17-NEXT:    ret void
17381 //
17382 //
17383 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
17384 // CHECK17-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
17385 // CHECK17-NEXT:  entry:
17386 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17387 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
17388 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
17389 // CHECK17-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
17390 // CHECK17-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
17391 // CHECK17-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
17392 // CHECK17-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
17393 // CHECK17-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
17394 // CHECK17-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
17395 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
17396 // CHECK17-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
17397 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
17398 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17399 // CHECK17-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
17400 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
17401 // CHECK17-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
17402 // CHECK17-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
17403 // CHECK17-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
17404 // CHECK17-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
17405 // CHECK17-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
17406 // CHECK17-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
17407 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
17408 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17409 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
17410 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
17411 // CHECK17-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
17412 // CHECK17-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
17413 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
17414 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
17415 // CHECK17-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
17416 // CHECK17-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
17417 // CHECK17-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
17418 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
17419 // CHECK17-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
17420 // CHECK17-NEXT:    store i32 [[TMP8]], i32* [[CONV6]], align 4
17421 // CHECK17-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
17422 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4
17423 // CHECK17-NEXT:    [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
17424 // CHECK17-NEXT:    store i32 [[TMP10]], i32* [[CONV7]], align 4
17425 // CHECK17-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
17426 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]])
17427 // CHECK17-NEXT:    ret void
17428 //
17429 //
17430 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..3
17431 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
17432 // CHECK17-NEXT:  entry:
17433 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17434 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17435 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17436 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
17437 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
17438 // CHECK17-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
17439 // CHECK17-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
17440 // CHECK17-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
17441 // CHECK17-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
17442 // CHECK17-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
17443 // CHECK17-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
17444 // CHECK17-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
17445 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17446 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i8, align 1
17447 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17448 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17449 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17450 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17451 // CHECK17-NEXT:    [[IT:%.*]] = alloca i8, align 1
17452 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17453 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17454 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17455 // CHECK17-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
17456 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
17457 // CHECK17-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
17458 // CHECK17-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
17459 // CHECK17-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
17460 // CHECK17-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
17461 // CHECK17-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
17462 // CHECK17-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
17463 // CHECK17-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
17464 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17465 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
17466 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
17467 // CHECK17-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
17468 // CHECK17-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
17469 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
17470 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
17471 // CHECK17-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
17472 // CHECK17-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
17473 // CHECK17-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
17474 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17475 // CHECK17-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
17476 // CHECK17-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17477 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17478 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4
17479 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17480 // CHECK17-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
17481 // CHECK17-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
17482 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
17483 // CHECK17:       omp.dispatch.cond:
17484 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17485 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
17486 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17487 // CHECK17:       cond.true:
17488 // CHECK17-NEXT:    br label [[COND_END:%.*]]
17489 // CHECK17:       cond.false:
17490 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17491 // CHECK17-NEXT:    br label [[COND_END]]
17492 // CHECK17:       cond.end:
17493 // CHECK17-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
17494 // CHECK17-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17495 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17496 // CHECK17-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
17497 // CHECK17-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
17498 // CHECK17-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17499 // CHECK17-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
17500 // CHECK17-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
17501 // CHECK17:       omp.dispatch.body:
17502 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17503 // CHECK17:       omp.inner.for.cond:
17504 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
17505 // CHECK17-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
17506 // CHECK17-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
17507 // CHECK17-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17508 // CHECK17:       omp.inner.for.body:
17509 // CHECK17-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
17510 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
17511 // CHECK17-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
17512 // CHECK17-NEXT:    [[CONV8:%.*]] = trunc i32 [[SUB]] to i8
17513 // CHECK17-NEXT:    store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !23
17514 // CHECK17-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !23
17515 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
17516 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group !23
17517 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
17518 // CHECK17-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !23
17519 // CHECK17-NEXT:    [[CONV9:%.*]] = fpext float [[TMP20]] to double
17520 // CHECK17-NEXT:    [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
17521 // CHECK17-NEXT:    [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
17522 // CHECK17-NEXT:    store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !23
17523 // CHECK17-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
17524 // CHECK17-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !23
17525 // CHECK17-NEXT:    [[CONV13:%.*]] = fpext float [[TMP21]] to double
17526 // CHECK17-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
17527 // CHECK17-NEXT:    [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
17528 // CHECK17-NEXT:    store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !23
17529 // CHECK17-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
17530 // CHECK17-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
17531 // CHECK17-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !23
17532 // CHECK17-NEXT:    [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00
17533 // CHECK17-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !23
17534 // CHECK17-NEXT:    [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]]
17535 // CHECK17-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]]
17536 // CHECK17-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
17537 // CHECK17-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !23
17538 // CHECK17-NEXT:    [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00
17539 // CHECK17-NEXT:    store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !23
17540 // CHECK17-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
17541 // CHECK17-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !23
17542 // CHECK17-NEXT:    [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1
17543 // CHECK17-NEXT:    store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !23
17544 // CHECK17-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
17545 // CHECK17-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !23
17546 // CHECK17-NEXT:    [[CONV23:%.*]] = sext i8 [[TMP26]] to i32
17547 // CHECK17-NEXT:    [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
17548 // CHECK17-NEXT:    [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
17549 // CHECK17-NEXT:    store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !23
17550 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17551 // CHECK17:       omp.body.continue:
17552 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17553 // CHECK17:       omp.inner.for.inc:
17554 // CHECK17-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
17555 // CHECK17-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1
17556 // CHECK17-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
17557 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
17558 // CHECK17:       omp.inner.for.end:
17559 // CHECK17-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
17560 // CHECK17:       omp.dispatch.inc:
17561 // CHECK17-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17562 // CHECK17-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17563 // CHECK17-NEXT:    [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
17564 // CHECK17-NEXT:    store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4
17565 // CHECK17-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17566 // CHECK17-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
17567 // CHECK17-NEXT:    [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
17568 // CHECK17-NEXT:    store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4
17569 // CHECK17-NEXT:    br label [[OMP_DISPATCH_COND]]
17570 // CHECK17:       omp.dispatch.end:
17571 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
17572 // CHECK17-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17573 // CHECK17-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
17574 // CHECK17-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17575 // CHECK17:       .omp.final.then:
17576 // CHECK17-NEXT:    store i8 96, i8* [[IT]], align 1
17577 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17578 // CHECK17:       .omp.final.done:
17579 // CHECK17-NEXT:    ret void
17580 //
17581 //
17582 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
17583 // CHECK17-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
17584 // CHECK17-NEXT:  entry:
17585 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17586 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17587 // CHECK17-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
17588 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
17589 // CHECK17-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
17590 // CHECK17-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
17591 // CHECK17-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
17592 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17593 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
17594 // CHECK17-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
17595 // CHECK17-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
17596 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17597 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
17598 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
17599 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
17600 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
17601 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
17602 // CHECK17-NEXT:    store i32 [[TMP1]], i32* [[CONV3]], align 4
17603 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
17604 // CHECK17-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
17605 // CHECK17-NEXT:    [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
17606 // CHECK17-NEXT:    store i16 [[TMP3]], i16* [[CONV4]], align 2
17607 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
17608 // CHECK17-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1
17609 // CHECK17-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
17610 // CHECK17-NEXT:    store i8 [[TMP5]], i8* [[CONV5]], align 1
17611 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
17612 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
17613 // CHECK17-NEXT:    ret void
17614 //
17615 //
17616 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..4
17617 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
17618 // CHECK17-NEXT:  entry:
17619 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17620 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17621 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17622 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17623 // CHECK17-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
17624 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
17625 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17626 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17627 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17628 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17629 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17630 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
17631 // CHECK17-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
17632 // CHECK17-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
17633 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17634 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
17635 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
17636 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
17637 // CHECK17-NEXT:    ret void
17638 //
17639 //
17640 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
17641 // CHECK17-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
17642 // CHECK17-NEXT:  entry:
17643 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
17644 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
17645 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
17646 // CHECK17-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
17647 // CHECK17-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
17648 // CHECK17-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
17649 // CHECK17-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
17650 // CHECK17-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
17651 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
17652 // CHECK17-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
17653 // CHECK17-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
17654 // CHECK17-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
17655 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
17656 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
17657 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
17658 // CHECK17-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
17659 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
17660 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
17661 // CHECK17-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
17662 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
17663 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
17664 // CHECK17-NEXT:    ret void
17665 //
17666 //
17667 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5
17668 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] {
17669 // CHECK17-NEXT:  entry:
17670 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17671 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17672 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
17673 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
17674 // CHECK17-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
17675 // CHECK17-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
17676 // CHECK17-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
17677 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
17678 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i64, align 8
17679 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
17680 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
17681 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
17682 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17683 // CHECK17-NEXT:    [[IT:%.*]] = alloca i64, align 8
17684 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17685 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17686 // CHECK17-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
17687 // CHECK17-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
17688 // CHECK17-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
17689 // CHECK17-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
17690 // CHECK17-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
17691 // CHECK17-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
17692 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
17693 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
17694 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
17695 // CHECK17-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
17696 // CHECK17-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
17697 // CHECK17-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
17698 // CHECK17-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
17699 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17700 // CHECK17-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17701 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
17702 // CHECK17-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
17703 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
17704 // CHECK17-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3
17705 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17706 // CHECK17:       cond.true:
17707 // CHECK17-NEXT:    br label [[COND_END:%.*]]
17708 // CHECK17:       cond.false:
17709 // CHECK17-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
17710 // CHECK17-NEXT:    br label [[COND_END]]
17711 // CHECK17:       cond.end:
17712 // CHECK17-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
17713 // CHECK17-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
17714 // CHECK17-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
17715 // CHECK17-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
17716 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17717 // CHECK17:       omp.inner.for.cond:
17718 // CHECK17-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
17719 // CHECK17-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !26
17720 // CHECK17-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]]
17721 // CHECK17-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17722 // CHECK17:       omp.inner.for.body:
17723 // CHECK17-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
17724 // CHECK17-NEXT:    [[MUL:%.*]] = mul i64 [[TMP11]], 400
17725 // CHECK17-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
17726 // CHECK17-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !26
17727 // CHECK17-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !26
17728 // CHECK17-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double
17729 // CHECK17-NEXT:    [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00
17730 // CHECK17-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
17731 // CHECK17-NEXT:    store double [[ADD]], double* [[A]], align 8, !llvm.access.group !26
17732 // CHECK17-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
17733 // CHECK17-NEXT:    [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !26
17734 // CHECK17-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
17735 // CHECK17-NEXT:    store double [[INC]], double* [[A5]], align 8, !llvm.access.group !26
17736 // CHECK17-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
17737 // CHECK17-NEXT:    [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]]
17738 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]]
17739 // CHECK17-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
17740 // CHECK17-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !26
17741 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17742 // CHECK17:       omp.body.continue:
17743 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17744 // CHECK17:       omp.inner.for.inc:
17745 // CHECK17-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
17746 // CHECK17-NEXT:    [[ADD8:%.*]] = add i64 [[TMP15]], 1
17747 // CHECK17-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
17748 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
17749 // CHECK17:       omp.inner.for.end:
17750 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17751 // CHECK17:       omp.loop.exit:
17752 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
17753 // CHECK17-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17754 // CHECK17-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
17755 // CHECK17-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17756 // CHECK17:       .omp.final.then:
17757 // CHECK17-NEXT:    store i64 400, i64* [[IT]], align 8
17758 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17759 // CHECK17:       .omp.final.done:
17760 // CHECK17-NEXT:    ret void
17761 //
17762 //
17763 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
17764 // CHECK17-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
17765 // CHECK17-NEXT:  entry:
17766 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17767 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17768 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
17769 // CHECK17-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
17770 // CHECK17-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
17771 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17772 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
17773 // CHECK17-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
17774 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17775 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
17776 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
17777 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
17778 // CHECK17-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
17779 // CHECK17-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
17780 // CHECK17-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
17781 // CHECK17-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
17782 // CHECK17-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
17783 // CHECK17-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
17784 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
17785 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
17786 // CHECK17-NEXT:    ret void
17787 //
17788 //
17789 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6
17790 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
17791 // CHECK17-NEXT:  entry:
17792 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17793 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17794 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17795 // CHECK17-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17796 // CHECK17-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
17797 // CHECK17-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
17798 // CHECK17-NEXT:    [[TMP:%.*]] = alloca i64, align 8
17799 // CHECK17-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
17800 // CHECK17-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
17801 // CHECK17-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
17802 // CHECK17-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17803 // CHECK17-NEXT:    [[I:%.*]] = alloca i64, align 8
17804 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17805 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17806 // CHECK17-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17807 // CHECK17-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
17808 // CHECK17-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
17809 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17810 // CHECK17-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
17811 // CHECK17-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
17812 // CHECK17-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
17813 // CHECK17-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
17814 // CHECK17-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
17815 // CHECK17-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17816 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17817 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
17818 // CHECK17-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
17819 // CHECK17-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
17820 // CHECK17-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
17821 // CHECK17-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17822 // CHECK17:       cond.true:
17823 // CHECK17-NEXT:    br label [[COND_END:%.*]]
17824 // CHECK17:       cond.false:
17825 // CHECK17-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
17826 // CHECK17-NEXT:    br label [[COND_END]]
17827 // CHECK17:       cond.end:
17828 // CHECK17-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
17829 // CHECK17-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
17830 // CHECK17-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
17831 // CHECK17-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
17832 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17833 // CHECK17:       omp.inner.for.cond:
17834 // CHECK17-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
17835 // CHECK17-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29
17836 // CHECK17-NEXT:    [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
17837 // CHECK17-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17838 // CHECK17:       omp.inner.for.body:
17839 // CHECK17-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
17840 // CHECK17-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
17841 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
17842 // CHECK17-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !29
17843 // CHECK17-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !29
17844 // CHECK17-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
17845 // CHECK17-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !29
17846 // CHECK17-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !29
17847 // CHECK17-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
17848 // CHECK17-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
17849 // CHECK17-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
17850 // CHECK17-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !29
17851 // CHECK17-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
17852 // CHECK17-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !29
17853 // CHECK17-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
17854 // CHECK17-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !29
17855 // CHECK17-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17856 // CHECK17:       omp.body.continue:
17857 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17858 // CHECK17:       omp.inner.for.inc:
17859 // CHECK17-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
17860 // CHECK17-NEXT:    [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1
17861 // CHECK17-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
17862 // CHECK17-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
17863 // CHECK17:       omp.inner.for.end:
17864 // CHECK17-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17865 // CHECK17:       omp.loop.exit:
17866 // CHECK17-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
17867 // CHECK17-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17868 // CHECK17-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
17869 // CHECK17-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17870 // CHECK17:       .omp.final.then:
17871 // CHECK17-NEXT:    store i64 11, i64* [[I]], align 8
17872 // CHECK17-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17873 // CHECK17:       .omp.final.done:
17874 // CHECK17-NEXT:    ret void
17875 //
17876 //
17877 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
17878 // CHECK18-SAME: () #[[ATTR0:[0-9]+]] {
17879 // CHECK18-NEXT:  entry:
17880 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
17881 // CHECK18-NEXT:    ret void
17882 //
17883 //
17884 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined.
17885 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
17886 // CHECK18-NEXT:  entry:
17887 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17888 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17889 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
17890 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
17891 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
17892 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
17893 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
17894 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17895 // CHECK18-NEXT:    [[I:%.*]] = alloca i32, align 4
17896 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
17897 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
17898 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
17899 // CHECK18-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
17900 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
17901 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
17902 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
17903 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
17904 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
17905 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17906 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
17907 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
17908 // CHECK18:       cond.true:
17909 // CHECK18-NEXT:    br label [[COND_END:%.*]]
17910 // CHECK18:       cond.false:
17911 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
17912 // CHECK18-NEXT:    br label [[COND_END]]
17913 // CHECK18:       cond.end:
17914 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
17915 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
17916 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
17917 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
17918 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
17919 // CHECK18:       omp.inner.for.cond:
17920 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
17921 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
17922 // CHECK18-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
17923 // CHECK18-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17924 // CHECK18:       omp.inner.for.body:
17925 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
17926 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
17927 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
17928 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
17929 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
17930 // CHECK18:       omp.body.continue:
17931 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
17932 // CHECK18:       omp.inner.for.inc:
17933 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
17934 // CHECK18-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
17935 // CHECK18-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
17936 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
17937 // CHECK18:       omp.inner.for.end:
17938 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
17939 // CHECK18:       omp.loop.exit:
17940 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
17941 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
17942 // CHECK18-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
17943 // CHECK18-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
17944 // CHECK18:       .omp.final.then:
17945 // CHECK18-NEXT:    store i32 33, i32* [[I]], align 4
17946 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
17947 // CHECK18:       .omp.final.done:
17948 // CHECK18-NEXT:    ret void
17949 //
17950 //
17951 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
17952 // CHECK18-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] {
17953 // CHECK18-NEXT:  entry:
17954 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17955 // CHECK18-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
17956 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17957 // CHECK18-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
17958 // CHECK18-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
17959 // CHECK18-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
17960 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
17961 // CHECK18-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
17962 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
17963 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
17964 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
17965 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
17966 // CHECK18-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
17967 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
17968 // CHECK18-NEXT:    store i16 [[TMP0]], i16* [[CONV3]], align 2
17969 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
17970 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
17971 // CHECK18-NEXT:    [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
17972 // CHECK18-NEXT:    store i32 [[TMP2]], i32* [[CONV4]], align 4
17973 // CHECK18-NEXT:    [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
17974 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4
17975 // CHECK18-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
17976 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[CONV5]], align 4
17977 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8
17978 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]])
17979 // CHECK18-NEXT:    ret void
17980 //
17981 //
17982 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..1
17983 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
17984 // CHECK18-NEXT:  entry:
17985 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
17986 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
17987 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
17988 // CHECK18-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
17989 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
17990 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
17991 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i64, align 8
17992 // CHECK18-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
17993 // CHECK18-NEXT:    [[DOTLINEAR_START3:%.*]] = alloca i32, align 4
17994 // CHECK18-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
17995 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
17996 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
17997 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
17998 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
17999 // CHECK18-NEXT:    [[IT:%.*]] = alloca i64, align 8
18000 // CHECK18-NEXT:    [[LIN4:%.*]] = alloca i32, align 4
18001 // CHECK18-NEXT:    [[A5:%.*]] = alloca i32, align 4
18002 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18003 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18004 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18005 // CHECK18-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
18006 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18007 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18008 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
18009 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18010 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4
18011 // CHECK18-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
18012 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4
18013 // CHECK18-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4
18014 // CHECK18-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]]
18015 // CHECK18-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
18016 // CHECK18-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
18017 // CHECK18-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
18018 // CHECK18-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
18019 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18020 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18021 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
18022 // CHECK18-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
18023 // CHECK18-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
18024 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
18025 // CHECK18-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
18026 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18027 // CHECK18:       cond.true:
18028 // CHECK18-NEXT:    br label [[COND_END:%.*]]
18029 // CHECK18:       cond.false:
18030 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
18031 // CHECK18-NEXT:    br label [[COND_END]]
18032 // CHECK18:       cond.end:
18033 // CHECK18-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
18034 // CHECK18-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
18035 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
18036 // CHECK18-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
18037 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18038 // CHECK18:       omp.inner.for.cond:
18039 // CHECK18-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
18040 // CHECK18-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !17
18041 // CHECK18-NEXT:    [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
18042 // CHECK18-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18043 // CHECK18:       omp.inner.for.body:
18044 // CHECK18-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
18045 // CHECK18-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
18046 // CHECK18-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
18047 // CHECK18-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !17
18048 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !17
18049 // CHECK18-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP10]] to i64
18050 // CHECK18-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
18051 // CHECK18-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17
18052 // CHECK18-NEXT:    [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]]
18053 // CHECK18-NEXT:    [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]]
18054 // CHECK18-NEXT:    [[CONV9:%.*]] = trunc i64 [[ADD]] to i32
18055 // CHECK18-NEXT:    store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !17
18056 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !17
18057 // CHECK18-NEXT:    [[CONV10:%.*]] = sext i32 [[TMP13]] to i64
18058 // CHECK18-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
18059 // CHECK18-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17
18060 // CHECK18-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]]
18061 // CHECK18-NEXT:    [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]]
18062 // CHECK18-NEXT:    [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32
18063 // CHECK18-NEXT:    store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !17
18064 // CHECK18-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !17
18065 // CHECK18-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP16]] to i32
18066 // CHECK18-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
18067 // CHECK18-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
18068 // CHECK18-NEXT:    store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group !17
18069 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18070 // CHECK18:       omp.body.continue:
18071 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18072 // CHECK18:       omp.inner.for.inc:
18073 // CHECK18-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
18074 // CHECK18-NEXT:    [[ADD17:%.*]] = add i64 [[TMP17]], 1
18075 // CHECK18-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
18076 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
18077 // CHECK18:       omp.inner.for.end:
18078 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18079 // CHECK18:       omp.loop.exit:
18080 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
18081 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18082 // CHECK18-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
18083 // CHECK18-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18084 // CHECK18:       .omp.final.then:
18085 // CHECK18-NEXT:    store i64 400, i64* [[IT]], align 8
18086 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18087 // CHECK18:       .omp.final.done:
18088 // CHECK18-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18089 // CHECK18-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
18090 // CHECK18-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
18091 // CHECK18:       .omp.linear.pu:
18092 // CHECK18-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4
18093 // CHECK18-NEXT:    store i32 [[TMP22]], i32* [[CONV1]], align 4
18094 // CHECK18-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A5]], align 4
18095 // CHECK18-NEXT:    store i32 [[TMP23]], i32* [[CONV2]], align 4
18096 // CHECK18-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
18097 // CHECK18:       .omp.linear.pu.done:
18098 // CHECK18-NEXT:    ret void
18099 //
18100 //
18101 // CHECK18-LABEL: define {{[^@]+}}@_Z7get_valv
18102 // CHECK18-SAME: () #[[ATTR3:[0-9]+]] {
18103 // CHECK18-NEXT:  entry:
18104 // CHECK18-NEXT:    ret i64 0
18105 //
18106 //
18107 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
18108 // CHECK18-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
18109 // CHECK18-NEXT:  entry:
18110 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18111 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18112 // CHECK18-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
18113 // CHECK18-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
18114 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18115 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18116 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18117 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18118 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
18119 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
18120 // CHECK18-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
18121 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
18122 // CHECK18-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
18123 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
18124 // CHECK18-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
18125 // CHECK18-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
18126 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
18127 // CHECK18-NEXT:    ret void
18128 //
18129 //
18130 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..2
18131 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
18132 // CHECK18-NEXT:  entry:
18133 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18134 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18135 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18136 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18137 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18138 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i16, align 2
18139 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18140 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18141 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18142 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18143 // CHECK18-NEXT:    [[IT:%.*]] = alloca i16, align 2
18144 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18145 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18146 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18147 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18148 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18149 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18150 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18151 // CHECK18-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
18152 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18153 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18154 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18155 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
18156 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18157 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18158 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
18159 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18160 // CHECK18:       cond.true:
18161 // CHECK18-NEXT:    br label [[COND_END:%.*]]
18162 // CHECK18:       cond.false:
18163 // CHECK18-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18164 // CHECK18-NEXT:    br label [[COND_END]]
18165 // CHECK18:       cond.end:
18166 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
18167 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18168 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18169 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
18170 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18171 // CHECK18:       omp.inner.for.cond:
18172 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
18173 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
18174 // CHECK18-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
18175 // CHECK18-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18176 // CHECK18:       omp.inner.for.body:
18177 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
18178 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
18179 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
18180 // CHECK18-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
18181 // CHECK18-NEXT:    store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !20
18182 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !20
18183 // CHECK18-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
18184 // CHECK18-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !20
18185 // CHECK18-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !20
18186 // CHECK18-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
18187 // CHECK18-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
18188 // CHECK18-NEXT:    [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
18189 // CHECK18-NEXT:    store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group !20
18190 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18191 // CHECK18:       omp.body.continue:
18192 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18193 // CHECK18:       omp.inner.for.inc:
18194 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
18195 // CHECK18-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
18196 // CHECK18-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
18197 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
18198 // CHECK18:       omp.inner.for.end:
18199 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18200 // CHECK18:       omp.loop.exit:
18201 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
18202 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18203 // CHECK18-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
18204 // CHECK18-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18205 // CHECK18:       .omp.final.then:
18206 // CHECK18-NEXT:    store i16 22, i16* [[IT]], align 2
18207 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18208 // CHECK18:       .omp.final.done:
18209 // CHECK18-NEXT:    ret void
18210 //
18211 //
18212 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
18213 // CHECK18-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
18214 // CHECK18-NEXT:  entry:
18215 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18216 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
18217 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
18218 // CHECK18-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
18219 // CHECK18-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
18220 // CHECK18-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
18221 // CHECK18-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
18222 // CHECK18-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
18223 // CHECK18-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
18224 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
18225 // CHECK18-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
18226 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
18227 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18228 // CHECK18-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
18229 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
18230 // CHECK18-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
18231 // CHECK18-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
18232 // CHECK18-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
18233 // CHECK18-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
18234 // CHECK18-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
18235 // CHECK18-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
18236 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
18237 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18238 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
18239 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
18240 // CHECK18-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
18241 // CHECK18-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
18242 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
18243 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
18244 // CHECK18-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
18245 // CHECK18-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
18246 // CHECK18-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
18247 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
18248 // CHECK18-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
18249 // CHECK18-NEXT:    store i32 [[TMP8]], i32* [[CONV6]], align 4
18250 // CHECK18-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
18251 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4
18252 // CHECK18-NEXT:    [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
18253 // CHECK18-NEXT:    store i32 [[TMP10]], i32* [[CONV7]], align 4
18254 // CHECK18-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
18255 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]])
18256 // CHECK18-NEXT:    ret void
18257 //
18258 //
18259 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..3
18260 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
18261 // CHECK18-NEXT:  entry:
18262 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18263 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18264 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18265 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
18266 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
18267 // CHECK18-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
18268 // CHECK18-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
18269 // CHECK18-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
18270 // CHECK18-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
18271 // CHECK18-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
18272 // CHECK18-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
18273 // CHECK18-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
18274 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18275 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i8, align 1
18276 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18277 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18278 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18279 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18280 // CHECK18-NEXT:    [[IT:%.*]] = alloca i8, align 1
18281 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18282 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18283 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18284 // CHECK18-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
18285 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
18286 // CHECK18-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
18287 // CHECK18-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
18288 // CHECK18-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
18289 // CHECK18-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
18290 // CHECK18-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
18291 // CHECK18-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
18292 // CHECK18-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
18293 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18294 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
18295 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
18296 // CHECK18-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
18297 // CHECK18-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
18298 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
18299 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
18300 // CHECK18-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
18301 // CHECK18-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
18302 // CHECK18-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
18303 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18304 // CHECK18-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
18305 // CHECK18-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18306 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18307 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4
18308 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18309 // CHECK18-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
18310 // CHECK18-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
18311 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
18312 // CHECK18:       omp.dispatch.cond:
18313 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18314 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
18315 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18316 // CHECK18:       cond.true:
18317 // CHECK18-NEXT:    br label [[COND_END:%.*]]
18318 // CHECK18:       cond.false:
18319 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18320 // CHECK18-NEXT:    br label [[COND_END]]
18321 // CHECK18:       cond.end:
18322 // CHECK18-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
18323 // CHECK18-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18324 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18325 // CHECK18-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
18326 // CHECK18-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
18327 // CHECK18-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18328 // CHECK18-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
18329 // CHECK18-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
18330 // CHECK18:       omp.dispatch.body:
18331 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18332 // CHECK18:       omp.inner.for.cond:
18333 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
18334 // CHECK18-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
18335 // CHECK18-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
18336 // CHECK18-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18337 // CHECK18:       omp.inner.for.body:
18338 // CHECK18-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
18339 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
18340 // CHECK18-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
18341 // CHECK18-NEXT:    [[CONV8:%.*]] = trunc i32 [[SUB]] to i8
18342 // CHECK18-NEXT:    store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !23
18343 // CHECK18-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !23
18344 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
18345 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group !23
18346 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
18347 // CHECK18-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !23
18348 // CHECK18-NEXT:    [[CONV9:%.*]] = fpext float [[TMP20]] to double
18349 // CHECK18-NEXT:    [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
18350 // CHECK18-NEXT:    [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
18351 // CHECK18-NEXT:    store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !23
18352 // CHECK18-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
18353 // CHECK18-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !23
18354 // CHECK18-NEXT:    [[CONV13:%.*]] = fpext float [[TMP21]] to double
18355 // CHECK18-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
18356 // CHECK18-NEXT:    [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
18357 // CHECK18-NEXT:    store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !23
18358 // CHECK18-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
18359 // CHECK18-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
18360 // CHECK18-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !23
18361 // CHECK18-NEXT:    [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00
18362 // CHECK18-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !23
18363 // CHECK18-NEXT:    [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]]
18364 // CHECK18-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]]
18365 // CHECK18-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
18366 // CHECK18-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !23
18367 // CHECK18-NEXT:    [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00
18368 // CHECK18-NEXT:    store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !23
18369 // CHECK18-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
18370 // CHECK18-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !23
18371 // CHECK18-NEXT:    [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1
18372 // CHECK18-NEXT:    store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !23
18373 // CHECK18-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
18374 // CHECK18-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !23
18375 // CHECK18-NEXT:    [[CONV23:%.*]] = sext i8 [[TMP26]] to i32
18376 // CHECK18-NEXT:    [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
18377 // CHECK18-NEXT:    [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
18378 // CHECK18-NEXT:    store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !23
18379 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18380 // CHECK18:       omp.body.continue:
18381 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18382 // CHECK18:       omp.inner.for.inc:
18383 // CHECK18-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
18384 // CHECK18-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1
18385 // CHECK18-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
18386 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
18387 // CHECK18:       omp.inner.for.end:
18388 // CHECK18-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
18389 // CHECK18:       omp.dispatch.inc:
18390 // CHECK18-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18391 // CHECK18-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18392 // CHECK18-NEXT:    [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
18393 // CHECK18-NEXT:    store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4
18394 // CHECK18-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18395 // CHECK18-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
18396 // CHECK18-NEXT:    [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
18397 // CHECK18-NEXT:    store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4
18398 // CHECK18-NEXT:    br label [[OMP_DISPATCH_COND]]
18399 // CHECK18:       omp.dispatch.end:
18400 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
18401 // CHECK18-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18402 // CHECK18-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
18403 // CHECK18-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18404 // CHECK18:       .omp.final.then:
18405 // CHECK18-NEXT:    store i8 96, i8* [[IT]], align 1
18406 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18407 // CHECK18:       .omp.final.done:
18408 // CHECK18-NEXT:    ret void
18409 //
18410 //
18411 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
18412 // CHECK18-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
18413 // CHECK18-NEXT:  entry:
18414 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18415 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18416 // CHECK18-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
18417 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
18418 // CHECK18-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
18419 // CHECK18-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
18420 // CHECK18-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
18421 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18422 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18423 // CHECK18-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
18424 // CHECK18-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
18425 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18426 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18427 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
18428 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
18429 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
18430 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
18431 // CHECK18-NEXT:    store i32 [[TMP1]], i32* [[CONV3]], align 4
18432 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
18433 // CHECK18-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
18434 // CHECK18-NEXT:    [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
18435 // CHECK18-NEXT:    store i16 [[TMP3]], i16* [[CONV4]], align 2
18436 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
18437 // CHECK18-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1
18438 // CHECK18-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
18439 // CHECK18-NEXT:    store i8 [[TMP5]], i8* [[CONV5]], align 1
18440 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
18441 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
18442 // CHECK18-NEXT:    ret void
18443 //
18444 //
18445 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..4
18446 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
18447 // CHECK18-NEXT:  entry:
18448 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18449 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18450 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18451 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18452 // CHECK18-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
18453 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
18454 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18455 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18456 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18457 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18458 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18459 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18460 // CHECK18-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
18461 // CHECK18-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
18462 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18463 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18464 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
18465 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
18466 // CHECK18-NEXT:    ret void
18467 //
18468 //
18469 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
18470 // CHECK18-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
18471 // CHECK18-NEXT:  entry:
18472 // CHECK18-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
18473 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
18474 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
18475 // CHECK18-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
18476 // CHECK18-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
18477 // CHECK18-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
18478 // CHECK18-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
18479 // CHECK18-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
18480 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
18481 // CHECK18-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
18482 // CHECK18-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
18483 // CHECK18-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
18484 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
18485 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
18486 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
18487 // CHECK18-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
18488 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4
18489 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32*
18490 // CHECK18-NEXT:    store i32 [[TMP4]], i32* [[CONV3]], align 4
18491 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8
18492 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]])
18493 // CHECK18-NEXT:    ret void
18494 //
18495 //
18496 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..5
18497 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] {
18498 // CHECK18-NEXT:  entry:
18499 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18500 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18501 // CHECK18-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
18502 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
18503 // CHECK18-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
18504 // CHECK18-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
18505 // CHECK18-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
18506 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
18507 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i64, align 8
18508 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
18509 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
18510 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
18511 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18512 // CHECK18-NEXT:    [[IT:%.*]] = alloca i64, align 8
18513 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18514 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18515 // CHECK18-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
18516 // CHECK18-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
18517 // CHECK18-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
18518 // CHECK18-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
18519 // CHECK18-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
18520 // CHECK18-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
18521 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
18522 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
18523 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
18524 // CHECK18-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
18525 // CHECK18-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
18526 // CHECK18-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
18527 // CHECK18-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
18528 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18529 // CHECK18-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18530 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
18531 // CHECK18-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
18532 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
18533 // CHECK18-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3
18534 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18535 // CHECK18:       cond.true:
18536 // CHECK18-NEXT:    br label [[COND_END:%.*]]
18537 // CHECK18:       cond.false:
18538 // CHECK18-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
18539 // CHECK18-NEXT:    br label [[COND_END]]
18540 // CHECK18:       cond.end:
18541 // CHECK18-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
18542 // CHECK18-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
18543 // CHECK18-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
18544 // CHECK18-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
18545 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18546 // CHECK18:       omp.inner.for.cond:
18547 // CHECK18-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
18548 // CHECK18-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !26
18549 // CHECK18-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]]
18550 // CHECK18-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18551 // CHECK18:       omp.inner.for.body:
18552 // CHECK18-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
18553 // CHECK18-NEXT:    [[MUL:%.*]] = mul i64 [[TMP11]], 400
18554 // CHECK18-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
18555 // CHECK18-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !26
18556 // CHECK18-NEXT:    [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !26
18557 // CHECK18-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double
18558 // CHECK18-NEXT:    [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00
18559 // CHECK18-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
18560 // CHECK18-NEXT:    store double [[ADD]], double* [[A]], align 8, !llvm.access.group !26
18561 // CHECK18-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
18562 // CHECK18-NEXT:    [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group !26
18563 // CHECK18-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
18564 // CHECK18-NEXT:    store double [[INC]], double* [[A5]], align 8, !llvm.access.group !26
18565 // CHECK18-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
18566 // CHECK18-NEXT:    [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]]
18567 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]]
18568 // CHECK18-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
18569 // CHECK18-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !26
18570 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18571 // CHECK18:       omp.body.continue:
18572 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18573 // CHECK18:       omp.inner.for.inc:
18574 // CHECK18-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
18575 // CHECK18-NEXT:    [[ADD8:%.*]] = add i64 [[TMP15]], 1
18576 // CHECK18-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
18577 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
18578 // CHECK18:       omp.inner.for.end:
18579 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18580 // CHECK18:       omp.loop.exit:
18581 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
18582 // CHECK18-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18583 // CHECK18-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
18584 // CHECK18-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18585 // CHECK18:       .omp.final.then:
18586 // CHECK18-NEXT:    store i64 400, i64* [[IT]], align 8
18587 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18588 // CHECK18:       .omp.final.done:
18589 // CHECK18-NEXT:    ret void
18590 //
18591 //
18592 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
18593 // CHECK18-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
18594 // CHECK18-NEXT:  entry:
18595 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18596 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18597 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
18598 // CHECK18-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
18599 // CHECK18-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
18600 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18601 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18602 // CHECK18-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
18603 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18604 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18605 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
18606 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
18607 // CHECK18-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
18608 // CHECK18-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
18609 // CHECK18-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
18610 // CHECK18-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
18611 // CHECK18-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
18612 // CHECK18-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
18613 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
18614 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
18615 // CHECK18-NEXT:    ret void
18616 //
18617 //
18618 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6
18619 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
18620 // CHECK18-NEXT:  entry:
18621 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
18622 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
18623 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
18624 // CHECK18-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
18625 // CHECK18-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
18626 // CHECK18-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
18627 // CHECK18-NEXT:    [[TMP:%.*]] = alloca i64, align 8
18628 // CHECK18-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
18629 // CHECK18-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
18630 // CHECK18-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
18631 // CHECK18-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18632 // CHECK18-NEXT:    [[I:%.*]] = alloca i64, align 8
18633 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
18634 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
18635 // CHECK18-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
18636 // CHECK18-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
18637 // CHECK18-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
18638 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
18639 // CHECK18-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
18640 // CHECK18-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
18641 // CHECK18-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
18642 // CHECK18-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
18643 // CHECK18-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
18644 // CHECK18-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18645 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
18646 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
18647 // CHECK18-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
18648 // CHECK18-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
18649 // CHECK18-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
18650 // CHECK18-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18651 // CHECK18:       cond.true:
18652 // CHECK18-NEXT:    br label [[COND_END:%.*]]
18653 // CHECK18:       cond.false:
18654 // CHECK18-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
18655 // CHECK18-NEXT:    br label [[COND_END]]
18656 // CHECK18:       cond.end:
18657 // CHECK18-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
18658 // CHECK18-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
18659 // CHECK18-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
18660 // CHECK18-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
18661 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18662 // CHECK18:       omp.inner.for.cond:
18663 // CHECK18-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
18664 // CHECK18-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !29
18665 // CHECK18-NEXT:    [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
18666 // CHECK18-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18667 // CHECK18:       omp.inner.for.body:
18668 // CHECK18-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
18669 // CHECK18-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
18670 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
18671 // CHECK18-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !29
18672 // CHECK18-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !29
18673 // CHECK18-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
18674 // CHECK18-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !29
18675 // CHECK18-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !29
18676 // CHECK18-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
18677 // CHECK18-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
18678 // CHECK18-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
18679 // CHECK18-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !29
18680 // CHECK18-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
18681 // CHECK18-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !29
18682 // CHECK18-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
18683 // CHECK18-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !29
18684 // CHECK18-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18685 // CHECK18:       omp.body.continue:
18686 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18687 // CHECK18:       omp.inner.for.inc:
18688 // CHECK18-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
18689 // CHECK18-NEXT:    [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1
18690 // CHECK18-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !29
18691 // CHECK18-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
18692 // CHECK18:       omp.inner.for.end:
18693 // CHECK18-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18694 // CHECK18:       omp.loop.exit:
18695 // CHECK18-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
18696 // CHECK18-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18697 // CHECK18-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
18698 // CHECK18-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18699 // CHECK18:       .omp.final.then:
18700 // CHECK18-NEXT:    store i64 11, i64* [[I]], align 8
18701 // CHECK18-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18702 // CHECK18:       .omp.final.done:
18703 // CHECK18-NEXT:    ret void
18704 //
18705 //
18706 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
18707 // CHECK19-SAME: () #[[ATTR0:[0-9]+]] {
18708 // CHECK19-NEXT:  entry:
18709 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
18710 // CHECK19-NEXT:    ret void
18711 //
18712 //
18713 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined.
18714 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
18715 // CHECK19-NEXT:  entry:
18716 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18717 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18718 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18719 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
18720 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18721 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18722 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18723 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18724 // CHECK19-NEXT:    [[I:%.*]] = alloca i32, align 4
18725 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18726 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18727 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18728 // CHECK19-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
18729 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18730 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18731 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18732 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
18733 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18734 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18735 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
18736 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18737 // CHECK19:       cond.true:
18738 // CHECK19-NEXT:    br label [[COND_END:%.*]]
18739 // CHECK19:       cond.false:
18740 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18741 // CHECK19-NEXT:    br label [[COND_END]]
18742 // CHECK19:       cond.end:
18743 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
18744 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18745 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18746 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
18747 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18748 // CHECK19:       omp.inner.for.cond:
18749 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
18750 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
18751 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
18752 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18753 // CHECK19:       omp.inner.for.body:
18754 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
18755 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
18756 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
18757 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
18758 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18759 // CHECK19:       omp.body.continue:
18760 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18761 // CHECK19:       omp.inner.for.inc:
18762 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
18763 // CHECK19-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
18764 // CHECK19-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
18765 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
18766 // CHECK19:       omp.inner.for.end:
18767 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18768 // CHECK19:       omp.loop.exit:
18769 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
18770 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18771 // CHECK19-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
18772 // CHECK19-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18773 // CHECK19:       .omp.final.then:
18774 // CHECK19-NEXT:    store i32 33, i32* [[I]], align 4
18775 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18776 // CHECK19:       .omp.final.done:
18777 // CHECK19-NEXT:    ret void
18778 //
18779 //
18780 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
18781 // CHECK19-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] {
18782 // CHECK19-NEXT:  entry:
18783 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
18784 // CHECK19-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
18785 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
18786 // CHECK19-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
18787 // CHECK19-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
18788 // CHECK19-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
18789 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
18790 // CHECK19-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
18791 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
18792 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
18793 // CHECK19-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
18794 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
18795 // CHECK19-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
18796 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
18797 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
18798 // CHECK19-NEXT:    store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4
18799 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
18800 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
18801 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[A_CASTED]], align 4
18802 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4
18803 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]])
18804 // CHECK19-NEXT:    ret void
18805 //
18806 //
18807 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..1
18808 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] {
18809 // CHECK19-NEXT:  entry:
18810 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18811 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18812 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
18813 // CHECK19-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
18814 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
18815 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
18816 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i64, align 4
18817 // CHECK19-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
18818 // CHECK19-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i32, align 4
18819 // CHECK19-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
18820 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
18821 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
18822 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
18823 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18824 // CHECK19-NEXT:    [[IT:%.*]] = alloca i64, align 8
18825 // CHECK19-NEXT:    [[LIN2:%.*]] = alloca i32, align 4
18826 // CHECK19-NEXT:    [[A3:%.*]] = alloca i32, align 4
18827 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18828 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18829 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
18830 // CHECK19-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
18831 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
18832 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
18833 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
18834 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
18835 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
18836 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4
18837 // CHECK19-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]]
18838 // CHECK19-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
18839 // CHECK19-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
18840 // CHECK19-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
18841 // CHECK19-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
18842 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18843 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18844 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
18845 // CHECK19-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
18846 // CHECK19-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
18847 // CHECK19-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
18848 // CHECK19-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
18849 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18850 // CHECK19:       cond.true:
18851 // CHECK19-NEXT:    br label [[COND_END:%.*]]
18852 // CHECK19:       cond.false:
18853 // CHECK19-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
18854 // CHECK19-NEXT:    br label [[COND_END]]
18855 // CHECK19:       cond.end:
18856 // CHECK19-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
18857 // CHECK19-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
18858 // CHECK19-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
18859 // CHECK19-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
18860 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18861 // CHECK19:       omp.inner.for.cond:
18862 // CHECK19-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
18863 // CHECK19-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
18864 // CHECK19-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
18865 // CHECK19-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18866 // CHECK19:       omp.inner.for.body:
18867 // CHECK19-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
18868 // CHECK19-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
18869 // CHECK19-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
18870 // CHECK19-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
18871 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !18
18872 // CHECK19-NEXT:    [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
18873 // CHECK19-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
18874 // CHECK19-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18
18875 // CHECK19-NEXT:    [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]]
18876 // CHECK19-NEXT:    [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]]
18877 // CHECK19-NEXT:    [[CONV7:%.*]] = trunc i64 [[ADD]] to i32
18878 // CHECK19-NEXT:    store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !18
18879 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !18
18880 // CHECK19-NEXT:    [[CONV8:%.*]] = sext i32 [[TMP13]] to i64
18881 // CHECK19-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
18882 // CHECK19-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18
18883 // CHECK19-NEXT:    [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]]
18884 // CHECK19-NEXT:    [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]]
18885 // CHECK19-NEXT:    [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32
18886 // CHECK19-NEXT:    store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !18
18887 // CHECK19-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !18
18888 // CHECK19-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP16]] to i32
18889 // CHECK19-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
18890 // CHECK19-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
18891 // CHECK19-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !18
18892 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
18893 // CHECK19:       omp.body.continue:
18894 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
18895 // CHECK19:       omp.inner.for.inc:
18896 // CHECK19-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
18897 // CHECK19-NEXT:    [[ADD15:%.*]] = add i64 [[TMP17]], 1
18898 // CHECK19-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
18899 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
18900 // CHECK19:       omp.inner.for.end:
18901 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
18902 // CHECK19:       omp.loop.exit:
18903 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
18904 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18905 // CHECK19-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
18906 // CHECK19-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
18907 // CHECK19:       .omp.final.then:
18908 // CHECK19-NEXT:    store i64 400, i64* [[IT]], align 8
18909 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
18910 // CHECK19:       .omp.final.done:
18911 // CHECK19-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
18912 // CHECK19-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
18913 // CHECK19-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
18914 // CHECK19:       .omp.linear.pu:
18915 // CHECK19-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4
18916 // CHECK19-NEXT:    store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4
18917 // CHECK19-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A3]], align 4
18918 // CHECK19-NEXT:    store i32 [[TMP23]], i32* [[A_ADDR]], align 4
18919 // CHECK19-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
18920 // CHECK19:       .omp.linear.pu.done:
18921 // CHECK19-NEXT:    ret void
18922 //
18923 //
18924 // CHECK19-LABEL: define {{[^@]+}}@_Z7get_valv
18925 // CHECK19-SAME: () #[[ATTR3:[0-9]+]] {
18926 // CHECK19-NEXT:  entry:
18927 // CHECK19-NEXT:    ret i64 0
18928 //
18929 //
18930 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
18931 // CHECK19-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
18932 // CHECK19-NEXT:  entry:
18933 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
18934 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
18935 // CHECK19-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
18936 // CHECK19-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
18937 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
18938 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
18939 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
18940 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
18941 // CHECK19-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
18942 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
18943 // CHECK19-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
18944 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
18945 // CHECK19-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
18946 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
18947 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
18948 // CHECK19-NEXT:    ret void
18949 //
18950 //
18951 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..2
18952 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
18953 // CHECK19-NEXT:  entry:
18954 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
18955 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
18956 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
18957 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
18958 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
18959 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i16, align 2
18960 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
18961 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
18962 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
18963 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
18964 // CHECK19-NEXT:    [[IT:%.*]] = alloca i16, align 2
18965 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
18966 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
18967 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
18968 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
18969 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
18970 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
18971 // CHECK19-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
18972 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
18973 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
18974 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
18975 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
18976 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
18977 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18978 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
18979 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
18980 // CHECK19:       cond.true:
18981 // CHECK19-NEXT:    br label [[COND_END:%.*]]
18982 // CHECK19:       cond.false:
18983 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
18984 // CHECK19-NEXT:    br label [[COND_END]]
18985 // CHECK19:       cond.end:
18986 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
18987 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
18988 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
18989 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
18990 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
18991 // CHECK19:       omp.inner.for.cond:
18992 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
18993 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
18994 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
18995 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
18996 // CHECK19:       omp.inner.for.body:
18997 // CHECK19-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
18998 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
18999 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
19000 // CHECK19-NEXT:    [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
19001 // CHECK19-NEXT:    store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !21
19002 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !21
19003 // CHECK19-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
19004 // CHECK19-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !21
19005 // CHECK19-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !21
19006 // CHECK19-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
19007 // CHECK19-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
19008 // CHECK19-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
19009 // CHECK19-NEXT:    store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group !21
19010 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19011 // CHECK19:       omp.body.continue:
19012 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19013 // CHECK19:       omp.inner.for.inc:
19014 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19015 // CHECK19-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
19016 // CHECK19-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19017 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
19018 // CHECK19:       omp.inner.for.end:
19019 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19020 // CHECK19:       omp.loop.exit:
19021 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
19022 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19023 // CHECK19-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
19024 // CHECK19-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19025 // CHECK19:       .omp.final.then:
19026 // CHECK19-NEXT:    store i16 22, i16* [[IT]], align 2
19027 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19028 // CHECK19:       .omp.final.done:
19029 // CHECK19-NEXT:    ret void
19030 //
19031 //
19032 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
19033 // CHECK19-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
19034 // CHECK19-NEXT:  entry:
19035 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19036 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
19037 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
19038 // CHECK19-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
19039 // CHECK19-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
19040 // CHECK19-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
19041 // CHECK19-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
19042 // CHECK19-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
19043 // CHECK19-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
19044 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
19045 // CHECK19-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
19046 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
19047 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19048 // CHECK19-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
19049 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
19050 // CHECK19-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
19051 // CHECK19-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
19052 // CHECK19-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
19053 // CHECK19-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
19054 // CHECK19-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
19055 // CHECK19-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
19056 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19057 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
19058 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
19059 // CHECK19-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
19060 // CHECK19-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
19061 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
19062 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
19063 // CHECK19-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
19064 // CHECK19-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
19065 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
19066 // CHECK19-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
19067 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
19068 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19069 // CHECK19-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
19070 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
19071 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]])
19072 // CHECK19-NEXT:    ret void
19073 //
19074 //
19075 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..3
19076 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
19077 // CHECK19-NEXT:  entry:
19078 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19079 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19080 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19081 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
19082 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
19083 // CHECK19-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
19084 // CHECK19-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
19085 // CHECK19-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
19086 // CHECK19-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
19087 // CHECK19-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
19088 // CHECK19-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
19089 // CHECK19-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
19090 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19091 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i8, align 1
19092 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19093 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19094 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19095 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19096 // CHECK19-NEXT:    [[IT:%.*]] = alloca i8, align 1
19097 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19098 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19099 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19100 // CHECK19-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
19101 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
19102 // CHECK19-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
19103 // CHECK19-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
19104 // CHECK19-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
19105 // CHECK19-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
19106 // CHECK19-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
19107 // CHECK19-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
19108 // CHECK19-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19109 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
19110 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
19111 // CHECK19-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
19112 // CHECK19-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
19113 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
19114 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
19115 // CHECK19-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
19116 // CHECK19-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
19117 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19118 // CHECK19-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
19119 // CHECK19-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19120 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19121 // CHECK19-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19122 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19123 // CHECK19-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
19124 // CHECK19-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
19125 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
19126 // CHECK19:       omp.dispatch.cond:
19127 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19128 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
19129 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19130 // CHECK19:       cond.true:
19131 // CHECK19-NEXT:    br label [[COND_END:%.*]]
19132 // CHECK19:       cond.false:
19133 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19134 // CHECK19-NEXT:    br label [[COND_END]]
19135 // CHECK19:       cond.end:
19136 // CHECK19-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
19137 // CHECK19-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19138 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19139 // CHECK19-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
19140 // CHECK19-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19141 // CHECK19-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19142 // CHECK19-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
19143 // CHECK19-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
19144 // CHECK19:       omp.dispatch.body:
19145 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19146 // CHECK19:       omp.inner.for.cond:
19147 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19148 // CHECK19-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
19149 // CHECK19-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
19150 // CHECK19-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19151 // CHECK19:       omp.inner.for.body:
19152 // CHECK19-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19153 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
19154 // CHECK19-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
19155 // CHECK19-NEXT:    [[CONV:%.*]] = trunc i32 [[SUB]] to i8
19156 // CHECK19-NEXT:    store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !24
19157 // CHECK19-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !24
19158 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
19159 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !24
19160 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
19161 // CHECK19-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24
19162 // CHECK19-NEXT:    [[CONV7:%.*]] = fpext float [[TMP20]] to double
19163 // CHECK19-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
19164 // CHECK19-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
19165 // CHECK19-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24
19166 // CHECK19-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
19167 // CHECK19-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
19168 // CHECK19-NEXT:    [[CONV11:%.*]] = fpext float [[TMP21]] to double
19169 // CHECK19-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
19170 // CHECK19-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
19171 // CHECK19-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
19172 // CHECK19-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
19173 // CHECK19-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2
19174 // CHECK19-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
19175 // CHECK19-NEXT:    [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00
19176 // CHECK19-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
19177 // CHECK19-NEXT:    [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]]
19178 // CHECK19-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]]
19179 // CHECK19-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3
19180 // CHECK19-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
19181 // CHECK19-NEXT:    [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00
19182 // CHECK19-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
19183 // CHECK19-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
19184 // CHECK19-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !24
19185 // CHECK19-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1
19186 // CHECK19-NEXT:    store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !24
19187 // CHECK19-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
19188 // CHECK19-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !24
19189 // CHECK19-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP26]] to i32
19190 // CHECK19-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
19191 // CHECK19-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
19192 // CHECK19-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !24
19193 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19194 // CHECK19:       omp.body.continue:
19195 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19196 // CHECK19:       omp.inner.for.inc:
19197 // CHECK19-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19198 // CHECK19-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1
19199 // CHECK19-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19200 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
19201 // CHECK19:       omp.inner.for.end:
19202 // CHECK19-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
19203 // CHECK19:       omp.dispatch.inc:
19204 // CHECK19-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19205 // CHECK19-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19206 // CHECK19-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
19207 // CHECK19-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4
19208 // CHECK19-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19209 // CHECK19-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
19210 // CHECK19-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
19211 // CHECK19-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4
19212 // CHECK19-NEXT:    br label [[OMP_DISPATCH_COND]]
19213 // CHECK19:       omp.dispatch.end:
19214 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
19215 // CHECK19-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19216 // CHECK19-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
19217 // CHECK19-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19218 // CHECK19:       .omp.final.then:
19219 // CHECK19-NEXT:    store i8 96, i8* [[IT]], align 1
19220 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19221 // CHECK19:       .omp.final.done:
19222 // CHECK19-NEXT:    ret void
19223 //
19224 //
19225 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
19226 // CHECK19-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
19227 // CHECK19-NEXT:  entry:
19228 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19229 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
19230 // CHECK19-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
19231 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
19232 // CHECK19-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
19233 // CHECK19-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
19234 // CHECK19-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
19235 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19236 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
19237 // CHECK19-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
19238 // CHECK19-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
19239 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
19240 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
19241 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
19242 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
19243 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
19244 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
19245 // CHECK19-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
19246 // CHECK19-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
19247 // CHECK19-NEXT:    store i16 [[TMP3]], i16* [[CONV2]], align 2
19248 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
19249 // CHECK19-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1
19250 // CHECK19-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
19251 // CHECK19-NEXT:    store i8 [[TMP5]], i8* [[CONV3]], align 1
19252 // CHECK19-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
19253 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
19254 // CHECK19-NEXT:    ret void
19255 //
19256 //
19257 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..4
19258 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
19259 // CHECK19-NEXT:  entry:
19260 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19261 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19262 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19263 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
19264 // CHECK19-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
19265 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
19266 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19267 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19268 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19269 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19270 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19271 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
19272 // CHECK19-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
19273 // CHECK19-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
19274 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
19275 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
19276 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
19277 // CHECK19-NEXT:    ret void
19278 //
19279 //
19280 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
19281 // CHECK19-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
19282 // CHECK19-NEXT:  entry:
19283 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
19284 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
19285 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
19286 // CHECK19-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
19287 // CHECK19-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
19288 // CHECK19-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
19289 // CHECK19-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
19290 // CHECK19-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
19291 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
19292 // CHECK19-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
19293 // CHECK19-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
19294 // CHECK19-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
19295 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
19296 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
19297 // CHECK19-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
19298 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
19299 // CHECK19-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
19300 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
19301 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
19302 // CHECK19-NEXT:    ret void
19303 //
19304 //
19305 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5
19306 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] {
19307 // CHECK19-NEXT:  entry:
19308 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19309 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19310 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
19311 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
19312 // CHECK19-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
19313 // CHECK19-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
19314 // CHECK19-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
19315 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
19316 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i64, align 4
19317 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
19318 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
19319 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
19320 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19321 // CHECK19-NEXT:    [[IT:%.*]] = alloca i64, align 8
19322 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19323 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19324 // CHECK19-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
19325 // CHECK19-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
19326 // CHECK19-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
19327 // CHECK19-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
19328 // CHECK19-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
19329 // CHECK19-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
19330 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
19331 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
19332 // CHECK19-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
19333 // CHECK19-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
19334 // CHECK19-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
19335 // CHECK19-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
19336 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19337 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19338 // CHECK19-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
19339 // CHECK19-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
19340 // CHECK19-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
19341 // CHECK19-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3
19342 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19343 // CHECK19:       cond.true:
19344 // CHECK19-NEXT:    br label [[COND_END:%.*]]
19345 // CHECK19:       cond.false:
19346 // CHECK19-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
19347 // CHECK19-NEXT:    br label [[COND_END]]
19348 // CHECK19:       cond.end:
19349 // CHECK19-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
19350 // CHECK19-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
19351 // CHECK19-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
19352 // CHECK19-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
19353 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19354 // CHECK19:       omp.inner.for.cond:
19355 // CHECK19-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
19356 // CHECK19-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !27
19357 // CHECK19-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]]
19358 // CHECK19-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19359 // CHECK19:       omp.inner.for.body:
19360 // CHECK19-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
19361 // CHECK19-NEXT:    [[MUL:%.*]] = mul i64 [[TMP11]], 400
19362 // CHECK19-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
19363 // CHECK19-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !27
19364 // CHECK19-NEXT:    [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !27
19365 // CHECK19-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP12]] to double
19366 // CHECK19-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
19367 // CHECK19-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
19368 // CHECK19-NEXT:    store double [[ADD]], double* [[A]], align 4, !llvm.access.group !27
19369 // CHECK19-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
19370 // CHECK19-NEXT:    [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !27
19371 // CHECK19-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
19372 // CHECK19-NEXT:    store double [[INC]], double* [[A4]], align 4, !llvm.access.group !27
19373 // CHECK19-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
19374 // CHECK19-NEXT:    [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]]
19375 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]]
19376 // CHECK19-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
19377 // CHECK19-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !27
19378 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19379 // CHECK19:       omp.body.continue:
19380 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19381 // CHECK19:       omp.inner.for.inc:
19382 // CHECK19-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
19383 // CHECK19-NEXT:    [[ADD7:%.*]] = add i64 [[TMP15]], 1
19384 // CHECK19-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
19385 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
19386 // CHECK19:       omp.inner.for.end:
19387 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19388 // CHECK19:       omp.loop.exit:
19389 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
19390 // CHECK19-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19391 // CHECK19-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
19392 // CHECK19-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19393 // CHECK19:       .omp.final.then:
19394 // CHECK19-NEXT:    store i64 400, i64* [[IT]], align 8
19395 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19396 // CHECK19:       .omp.final.done:
19397 // CHECK19-NEXT:    ret void
19398 //
19399 //
19400 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
19401 // CHECK19-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
19402 // CHECK19-NEXT:  entry:
19403 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19404 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
19405 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
19406 // CHECK19-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
19407 // CHECK19-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
19408 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19409 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
19410 // CHECK19-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
19411 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
19412 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
19413 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
19414 // CHECK19-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
19415 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
19416 // CHECK19-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
19417 // CHECK19-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
19418 // CHECK19-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
19419 // CHECK19-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
19420 // CHECK19-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
19421 // CHECK19-NEXT:    ret void
19422 //
19423 //
19424 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6
19425 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
19426 // CHECK19-NEXT:  entry:
19427 // CHECK19-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19428 // CHECK19-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19429 // CHECK19-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19430 // CHECK19-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
19431 // CHECK19-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
19432 // CHECK19-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
19433 // CHECK19-NEXT:    [[TMP:%.*]] = alloca i64, align 4
19434 // CHECK19-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
19435 // CHECK19-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
19436 // CHECK19-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
19437 // CHECK19-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19438 // CHECK19-NEXT:    [[I:%.*]] = alloca i64, align 8
19439 // CHECK19-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19440 // CHECK19-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19441 // CHECK19-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19442 // CHECK19-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
19443 // CHECK19-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
19444 // CHECK19-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
19445 // CHECK19-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
19446 // CHECK19-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
19447 // CHECK19-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
19448 // CHECK19-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
19449 // CHECK19-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19450 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19451 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
19452 // CHECK19-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
19453 // CHECK19-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
19454 // CHECK19-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
19455 // CHECK19-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19456 // CHECK19:       cond.true:
19457 // CHECK19-NEXT:    br label [[COND_END:%.*]]
19458 // CHECK19:       cond.false:
19459 // CHECK19-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
19460 // CHECK19-NEXT:    br label [[COND_END]]
19461 // CHECK19:       cond.end:
19462 // CHECK19-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
19463 // CHECK19-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
19464 // CHECK19-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
19465 // CHECK19-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
19466 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19467 // CHECK19:       omp.inner.for.cond:
19468 // CHECK19-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
19469 // CHECK19-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30
19470 // CHECK19-NEXT:    [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
19471 // CHECK19-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19472 // CHECK19:       omp.inner.for.body:
19473 // CHECK19-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
19474 // CHECK19-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
19475 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
19476 // CHECK19-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !30
19477 // CHECK19-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !30
19478 // CHECK19-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
19479 // CHECK19-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !30
19480 // CHECK19-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
19481 // CHECK19-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
19482 // CHECK19-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
19483 // CHECK19-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
19484 // CHECK19-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !30
19485 // CHECK19-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
19486 // CHECK19-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !30
19487 // CHECK19-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
19488 // CHECK19-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !30
19489 // CHECK19-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19490 // CHECK19:       omp.body.continue:
19491 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19492 // CHECK19:       omp.inner.for.inc:
19493 // CHECK19-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
19494 // CHECK19-NEXT:    [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1
19495 // CHECK19-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
19496 // CHECK19-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
19497 // CHECK19:       omp.inner.for.end:
19498 // CHECK19-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19499 // CHECK19:       omp.loop.exit:
19500 // CHECK19-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
19501 // CHECK19-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19502 // CHECK19-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
19503 // CHECK19-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19504 // CHECK19:       .omp.final.then:
19505 // CHECK19-NEXT:    store i64 11, i64* [[I]], align 8
19506 // CHECK19-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19507 // CHECK19:       .omp.final.done:
19508 // CHECK19-NEXT:    ret void
19509 //
19510 //
19511 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
19512 // CHECK20-SAME: () #[[ATTR0:[0-9]+]] {
19513 // CHECK20-NEXT:  entry:
19514 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
19515 // CHECK20-NEXT:    ret void
19516 //
19517 //
19518 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined.
19519 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
19520 // CHECK20-NEXT:  entry:
19521 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19522 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19523 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19524 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
19525 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19526 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19527 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19528 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19529 // CHECK20-NEXT:    [[I:%.*]] = alloca i32, align 4
19530 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19531 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19532 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19533 // CHECK20-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
19534 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19535 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19536 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19537 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
19538 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19539 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19540 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
19541 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19542 // CHECK20:       cond.true:
19543 // CHECK20-NEXT:    br label [[COND_END:%.*]]
19544 // CHECK20:       cond.false:
19545 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19546 // CHECK20-NEXT:    br label [[COND_END]]
19547 // CHECK20:       cond.end:
19548 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
19549 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19550 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19551 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
19552 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19553 // CHECK20:       omp.inner.for.cond:
19554 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
19555 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
19556 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
19557 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19558 // CHECK20:       omp.inner.for.body:
19559 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
19560 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
19561 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
19562 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
19563 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19564 // CHECK20:       omp.body.continue:
19565 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19566 // CHECK20:       omp.inner.for.inc:
19567 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
19568 // CHECK20-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
19569 // CHECK20-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
19570 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
19571 // CHECK20:       omp.inner.for.end:
19572 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19573 // CHECK20:       omp.loop.exit:
19574 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
19575 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19576 // CHECK20-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
19577 // CHECK20-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19578 // CHECK20:       .omp.final.then:
19579 // CHECK20-NEXT:    store i32 33, i32* [[I]], align 4
19580 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19581 // CHECK20:       .omp.final.done:
19582 // CHECK20-NEXT:    ret void
19583 //
19584 //
19585 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
19586 // CHECK20-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] {
19587 // CHECK20-NEXT:  entry:
19588 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
19589 // CHECK20-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
19590 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19591 // CHECK20-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
19592 // CHECK20-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
19593 // CHECK20-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
19594 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
19595 // CHECK20-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
19596 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19597 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
19598 // CHECK20-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
19599 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
19600 // CHECK20-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
19601 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
19602 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
19603 // CHECK20-NEXT:    store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4
19604 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
19605 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
19606 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[A_CASTED]], align 4
19607 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4
19608 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]])
19609 // CHECK20-NEXT:    ret void
19610 //
19611 //
19612 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..1
19613 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] {
19614 // CHECK20-NEXT:  entry:
19615 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19616 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19617 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
19618 // CHECK20-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
19619 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19620 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
19621 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i64, align 4
19622 // CHECK20-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
19623 // CHECK20-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i32, align 4
19624 // CHECK20-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
19625 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
19626 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
19627 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
19628 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19629 // CHECK20-NEXT:    [[IT:%.*]] = alloca i64, align 8
19630 // CHECK20-NEXT:    [[LIN2:%.*]] = alloca i32, align 4
19631 // CHECK20-NEXT:    [[A3:%.*]] = alloca i32, align 4
19632 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19633 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19634 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
19635 // CHECK20-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
19636 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19637 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
19638 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
19639 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
19640 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
19641 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4
19642 // CHECK20-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]]
19643 // CHECK20-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
19644 // CHECK20-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
19645 // CHECK20-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
19646 // CHECK20-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
19647 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19648 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19649 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
19650 // CHECK20-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
19651 // CHECK20-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
19652 // CHECK20-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
19653 // CHECK20-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
19654 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19655 // CHECK20:       cond.true:
19656 // CHECK20-NEXT:    br label [[COND_END:%.*]]
19657 // CHECK20:       cond.false:
19658 // CHECK20-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
19659 // CHECK20-NEXT:    br label [[COND_END]]
19660 // CHECK20:       cond.end:
19661 // CHECK20-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
19662 // CHECK20-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
19663 // CHECK20-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
19664 // CHECK20-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
19665 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19666 // CHECK20:       omp.inner.for.cond:
19667 // CHECK20-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
19668 // CHECK20-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
19669 // CHECK20-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
19670 // CHECK20-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19671 // CHECK20:       omp.inner.for.body:
19672 // CHECK20-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
19673 // CHECK20-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
19674 // CHECK20-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
19675 // CHECK20-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
19676 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !18
19677 // CHECK20-NEXT:    [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
19678 // CHECK20-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
19679 // CHECK20-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18
19680 // CHECK20-NEXT:    [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]]
19681 // CHECK20-NEXT:    [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]]
19682 // CHECK20-NEXT:    [[CONV7:%.*]] = trunc i64 [[ADD]] to i32
19683 // CHECK20-NEXT:    store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !18
19684 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !18
19685 // CHECK20-NEXT:    [[CONV8:%.*]] = sext i32 [[TMP13]] to i64
19686 // CHECK20-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
19687 // CHECK20-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18
19688 // CHECK20-NEXT:    [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]]
19689 // CHECK20-NEXT:    [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]]
19690 // CHECK20-NEXT:    [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32
19691 // CHECK20-NEXT:    store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !18
19692 // CHECK20-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !18
19693 // CHECK20-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP16]] to i32
19694 // CHECK20-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
19695 // CHECK20-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
19696 // CHECK20-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !18
19697 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19698 // CHECK20:       omp.body.continue:
19699 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19700 // CHECK20:       omp.inner.for.inc:
19701 // CHECK20-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
19702 // CHECK20-NEXT:    [[ADD15:%.*]] = add i64 [[TMP17]], 1
19703 // CHECK20-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
19704 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
19705 // CHECK20:       omp.inner.for.end:
19706 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19707 // CHECK20:       omp.loop.exit:
19708 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
19709 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19710 // CHECK20-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
19711 // CHECK20-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19712 // CHECK20:       .omp.final.then:
19713 // CHECK20-NEXT:    store i64 400, i64* [[IT]], align 8
19714 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19715 // CHECK20:       .omp.final.done:
19716 // CHECK20-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19717 // CHECK20-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
19718 // CHECK20-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
19719 // CHECK20:       .omp.linear.pu:
19720 // CHECK20-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4
19721 // CHECK20-NEXT:    store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4
19722 // CHECK20-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A3]], align 4
19723 // CHECK20-NEXT:    store i32 [[TMP23]], i32* [[A_ADDR]], align 4
19724 // CHECK20-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
19725 // CHECK20:       .omp.linear.pu.done:
19726 // CHECK20-NEXT:    ret void
19727 //
19728 //
19729 // CHECK20-LABEL: define {{[^@]+}}@_Z7get_valv
19730 // CHECK20-SAME: () #[[ATTR3:[0-9]+]] {
19731 // CHECK20-NEXT:  entry:
19732 // CHECK20-NEXT:    ret i64 0
19733 //
19734 //
19735 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
19736 // CHECK20-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
19737 // CHECK20-NEXT:  entry:
19738 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19739 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
19740 // CHECK20-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
19741 // CHECK20-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
19742 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19743 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
19744 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
19745 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
19746 // CHECK20-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
19747 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
19748 // CHECK20-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
19749 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
19750 // CHECK20-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
19751 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
19752 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
19753 // CHECK20-NEXT:    ret void
19754 //
19755 //
19756 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..2
19757 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
19758 // CHECK20-NEXT:  entry:
19759 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19760 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19761 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19762 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
19763 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19764 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i16, align 2
19765 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19766 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19767 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19768 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19769 // CHECK20-NEXT:    [[IT:%.*]] = alloca i16, align 2
19770 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19771 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19772 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19773 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
19774 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
19775 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19776 // CHECK20-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
19777 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19778 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19779 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19780 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
19781 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
19782 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19783 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
19784 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19785 // CHECK20:       cond.true:
19786 // CHECK20-NEXT:    br label [[COND_END:%.*]]
19787 // CHECK20:       cond.false:
19788 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19789 // CHECK20-NEXT:    br label [[COND_END]]
19790 // CHECK20:       cond.end:
19791 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
19792 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19793 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19794 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
19795 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19796 // CHECK20:       omp.inner.for.cond:
19797 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19798 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
19799 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
19800 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19801 // CHECK20:       omp.inner.for.body:
19802 // CHECK20-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19803 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
19804 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
19805 // CHECK20-NEXT:    [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
19806 // CHECK20-NEXT:    store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !21
19807 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !21
19808 // CHECK20-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
19809 // CHECK20-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !21
19810 // CHECK20-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !21
19811 // CHECK20-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
19812 // CHECK20-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
19813 // CHECK20-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
19814 // CHECK20-NEXT:    store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group !21
19815 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19816 // CHECK20:       omp.body.continue:
19817 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
19818 // CHECK20:       omp.inner.for.inc:
19819 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19820 // CHECK20-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
19821 // CHECK20-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
19822 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
19823 // CHECK20:       omp.inner.for.end:
19824 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
19825 // CHECK20:       omp.loop.exit:
19826 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
19827 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
19828 // CHECK20-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
19829 // CHECK20-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
19830 // CHECK20:       .omp.final.then:
19831 // CHECK20-NEXT:    store i16 22, i16* [[IT]], align 2
19832 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
19833 // CHECK20:       .omp.final.done:
19834 // CHECK20-NEXT:    ret void
19835 //
19836 //
19837 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
19838 // CHECK20-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
19839 // CHECK20-NEXT:  entry:
19840 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19841 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
19842 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
19843 // CHECK20-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
19844 // CHECK20-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
19845 // CHECK20-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
19846 // CHECK20-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
19847 // CHECK20-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
19848 // CHECK20-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
19849 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
19850 // CHECK20-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
19851 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
19852 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19853 // CHECK20-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
19854 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
19855 // CHECK20-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
19856 // CHECK20-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
19857 // CHECK20-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
19858 // CHECK20-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
19859 // CHECK20-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
19860 // CHECK20-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
19861 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19862 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
19863 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
19864 // CHECK20-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
19865 // CHECK20-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
19866 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
19867 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
19868 // CHECK20-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
19869 // CHECK20-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
19870 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
19871 // CHECK20-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
19872 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
19873 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19874 // CHECK20-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
19875 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
19876 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]])
19877 // CHECK20-NEXT:    ret void
19878 //
19879 //
19880 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..3
19881 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
19882 // CHECK20-NEXT:  entry:
19883 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
19884 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
19885 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
19886 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
19887 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
19888 // CHECK20-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
19889 // CHECK20-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
19890 // CHECK20-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
19891 // CHECK20-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
19892 // CHECK20-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
19893 // CHECK20-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
19894 // CHECK20-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
19895 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
19896 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i8, align 1
19897 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
19898 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
19899 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
19900 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
19901 // CHECK20-NEXT:    [[IT:%.*]] = alloca i8, align 1
19902 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
19903 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
19904 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
19905 // CHECK20-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
19906 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
19907 // CHECK20-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
19908 // CHECK20-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
19909 // CHECK20-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
19910 // CHECK20-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
19911 // CHECK20-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
19912 // CHECK20-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
19913 // CHECK20-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19914 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
19915 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
19916 // CHECK20-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
19917 // CHECK20-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
19918 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
19919 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
19920 // CHECK20-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
19921 // CHECK20-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
19922 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
19923 // CHECK20-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
19924 // CHECK20-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
19925 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
19926 // CHECK20-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
19927 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
19928 // CHECK20-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
19929 // CHECK20-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
19930 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
19931 // CHECK20:       omp.dispatch.cond:
19932 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19933 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
19934 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
19935 // CHECK20:       cond.true:
19936 // CHECK20-NEXT:    br label [[COND_END:%.*]]
19937 // CHECK20:       cond.false:
19938 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19939 // CHECK20-NEXT:    br label [[COND_END]]
19940 // CHECK20:       cond.end:
19941 // CHECK20-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
19942 // CHECK20-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
19943 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
19944 // CHECK20-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
19945 // CHECK20-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
19946 // CHECK20-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
19947 // CHECK20-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
19948 // CHECK20-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
19949 // CHECK20:       omp.dispatch.body:
19950 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
19951 // CHECK20:       omp.inner.for.cond:
19952 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19953 // CHECK20-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
19954 // CHECK20-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
19955 // CHECK20-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
19956 // CHECK20:       omp.inner.for.body:
19957 // CHECK20-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
19958 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
19959 // CHECK20-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
19960 // CHECK20-NEXT:    [[CONV:%.*]] = trunc i32 [[SUB]] to i8
19961 // CHECK20-NEXT:    store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !24
19962 // CHECK20-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !24
19963 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
19964 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !24
19965 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
19966 // CHECK20-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24
19967 // CHECK20-NEXT:    [[CONV7:%.*]] = fpext float [[TMP20]] to double
19968 // CHECK20-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
19969 // CHECK20-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
19970 // CHECK20-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24
19971 // CHECK20-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
19972 // CHECK20-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
19973 // CHECK20-NEXT:    [[CONV11:%.*]] = fpext float [[TMP21]] to double
19974 // CHECK20-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
19975 // CHECK20-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
19976 // CHECK20-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
19977 // CHECK20-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
19978 // CHECK20-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2
19979 // CHECK20-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
19980 // CHECK20-NEXT:    [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00
19981 // CHECK20-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
19982 // CHECK20-NEXT:    [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]]
19983 // CHECK20-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]]
19984 // CHECK20-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3
19985 // CHECK20-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
19986 // CHECK20-NEXT:    [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00
19987 // CHECK20-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
19988 // CHECK20-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
19989 // CHECK20-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !24
19990 // CHECK20-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1
19991 // CHECK20-NEXT:    store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !24
19992 // CHECK20-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
19993 // CHECK20-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !24
19994 // CHECK20-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP26]] to i32
19995 // CHECK20-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
19996 // CHECK20-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
19997 // CHECK20-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !24
19998 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
19999 // CHECK20:       omp.body.continue:
20000 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20001 // CHECK20:       omp.inner.for.inc:
20002 // CHECK20-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
20003 // CHECK20-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1
20004 // CHECK20-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
20005 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
20006 // CHECK20:       omp.inner.for.end:
20007 // CHECK20-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
20008 // CHECK20:       omp.dispatch.inc:
20009 // CHECK20-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20010 // CHECK20-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20011 // CHECK20-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
20012 // CHECK20-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4
20013 // CHECK20-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20014 // CHECK20-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20015 // CHECK20-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
20016 // CHECK20-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4
20017 // CHECK20-NEXT:    br label [[OMP_DISPATCH_COND]]
20018 // CHECK20:       omp.dispatch.end:
20019 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
20020 // CHECK20-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20021 // CHECK20-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
20022 // CHECK20-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20023 // CHECK20:       .omp.final.then:
20024 // CHECK20-NEXT:    store i8 96, i8* [[IT]], align 1
20025 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20026 // CHECK20:       .omp.final.done:
20027 // CHECK20-NEXT:    ret void
20028 //
20029 //
20030 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
20031 // CHECK20-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
20032 // CHECK20-NEXT:  entry:
20033 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20034 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20035 // CHECK20-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
20036 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
20037 // CHECK20-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
20038 // CHECK20-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
20039 // CHECK20-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
20040 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20041 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20042 // CHECK20-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
20043 // CHECK20-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
20044 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20045 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
20046 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
20047 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
20048 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
20049 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
20050 // CHECK20-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
20051 // CHECK20-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
20052 // CHECK20-NEXT:    store i16 [[TMP3]], i16* [[CONV2]], align 2
20053 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
20054 // CHECK20-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1
20055 // CHECK20-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
20056 // CHECK20-NEXT:    store i8 [[TMP5]], i8* [[CONV3]], align 1
20057 // CHECK20-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
20058 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
20059 // CHECK20-NEXT:    ret void
20060 //
20061 //
20062 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..4
20063 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
20064 // CHECK20-NEXT:  entry:
20065 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20066 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20067 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20068 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20069 // CHECK20-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
20070 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
20071 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20072 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20073 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20074 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20075 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20076 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20077 // CHECK20-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
20078 // CHECK20-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
20079 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20080 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
20081 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
20082 // CHECK20-NEXT:    ret void
20083 //
20084 //
20085 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216
20086 // CHECK20-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] {
20087 // CHECK20-NEXT:  entry:
20088 // CHECK20-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
20089 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
20090 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
20091 // CHECK20-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
20092 // CHECK20-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
20093 // CHECK20-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
20094 // CHECK20-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
20095 // CHECK20-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
20096 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
20097 // CHECK20-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
20098 // CHECK20-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
20099 // CHECK20-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
20100 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
20101 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
20102 // CHECK20-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
20103 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4
20104 // CHECK20-NEXT:    store i32 [[TMP4]], i32* [[B_CASTED]], align 4
20105 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4
20106 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]])
20107 // CHECK20-NEXT:    ret void
20108 //
20109 //
20110 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..5
20111 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] {
20112 // CHECK20-NEXT:  entry:
20113 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20114 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20115 // CHECK20-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
20116 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
20117 // CHECK20-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
20118 // CHECK20-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
20119 // CHECK20-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
20120 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
20121 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i64, align 4
20122 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
20123 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
20124 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
20125 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20126 // CHECK20-NEXT:    [[IT:%.*]] = alloca i64, align 8
20127 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20128 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20129 // CHECK20-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
20130 // CHECK20-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
20131 // CHECK20-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
20132 // CHECK20-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
20133 // CHECK20-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
20134 // CHECK20-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
20135 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
20136 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
20137 // CHECK20-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
20138 // CHECK20-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
20139 // CHECK20-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
20140 // CHECK20-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
20141 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20142 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20143 // CHECK20-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
20144 // CHECK20-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
20145 // CHECK20-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
20146 // CHECK20-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3
20147 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20148 // CHECK20:       cond.true:
20149 // CHECK20-NEXT:    br label [[COND_END:%.*]]
20150 // CHECK20:       cond.false:
20151 // CHECK20-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
20152 // CHECK20-NEXT:    br label [[COND_END]]
20153 // CHECK20:       cond.end:
20154 // CHECK20-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
20155 // CHECK20-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
20156 // CHECK20-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
20157 // CHECK20-NEXT:    store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
20158 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20159 // CHECK20:       omp.inner.for.cond:
20160 // CHECK20-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
20161 // CHECK20-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !27
20162 // CHECK20-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]]
20163 // CHECK20-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20164 // CHECK20:       omp.inner.for.body:
20165 // CHECK20-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
20166 // CHECK20-NEXT:    [[MUL:%.*]] = mul i64 [[TMP11]], 400
20167 // CHECK20-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
20168 // CHECK20-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !27
20169 // CHECK20-NEXT:    [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !27
20170 // CHECK20-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP12]] to double
20171 // CHECK20-NEXT:    [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00
20172 // CHECK20-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
20173 // CHECK20-NEXT:    store double [[ADD]], double* [[A]], align 4, !llvm.access.group !27
20174 // CHECK20-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
20175 // CHECK20-NEXT:    [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group !27
20176 // CHECK20-NEXT:    [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00
20177 // CHECK20-NEXT:    store double [[INC]], double* [[A4]], align 4, !llvm.access.group !27
20178 // CHECK20-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
20179 // CHECK20-NEXT:    [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]]
20180 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]]
20181 // CHECK20-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
20182 // CHECK20-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !27
20183 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20184 // CHECK20:       omp.body.continue:
20185 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20186 // CHECK20:       omp.inner.for.inc:
20187 // CHECK20-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
20188 // CHECK20-NEXT:    [[ADD7:%.*]] = add i64 [[TMP15]], 1
20189 // CHECK20-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
20190 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
20191 // CHECK20:       omp.inner.for.end:
20192 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20193 // CHECK20:       omp.loop.exit:
20194 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
20195 // CHECK20-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20196 // CHECK20-NEXT:    [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
20197 // CHECK20-NEXT:    br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20198 // CHECK20:       .omp.final.then:
20199 // CHECK20-NEXT:    store i64 400, i64* [[IT]], align 8
20200 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20201 // CHECK20:       .omp.final.done:
20202 // CHECK20-NEXT:    ret void
20203 //
20204 //
20205 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
20206 // CHECK20-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
20207 // CHECK20-NEXT:  entry:
20208 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20209 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20210 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
20211 // CHECK20-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
20212 // CHECK20-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
20213 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20214 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20215 // CHECK20-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
20216 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20217 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
20218 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
20219 // CHECK20-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
20220 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
20221 // CHECK20-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
20222 // CHECK20-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
20223 // CHECK20-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
20224 // CHECK20-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
20225 // CHECK20-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
20226 // CHECK20-NEXT:    ret void
20227 //
20228 //
20229 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6
20230 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
20231 // CHECK20-NEXT:  entry:
20232 // CHECK20-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
20233 // CHECK20-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
20234 // CHECK20-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
20235 // CHECK20-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
20236 // CHECK20-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
20237 // CHECK20-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
20238 // CHECK20-NEXT:    [[TMP:%.*]] = alloca i64, align 4
20239 // CHECK20-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
20240 // CHECK20-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
20241 // CHECK20-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
20242 // CHECK20-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20243 // CHECK20-NEXT:    [[I:%.*]] = alloca i64, align 8
20244 // CHECK20-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
20245 // CHECK20-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
20246 // CHECK20-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
20247 // CHECK20-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
20248 // CHECK20-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
20249 // CHECK20-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
20250 // CHECK20-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
20251 // CHECK20-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
20252 // CHECK20-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
20253 // CHECK20-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
20254 // CHECK20-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20255 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
20256 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
20257 // CHECK20-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
20258 // CHECK20-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
20259 // CHECK20-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
20260 // CHECK20-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20261 // CHECK20:       cond.true:
20262 // CHECK20-NEXT:    br label [[COND_END:%.*]]
20263 // CHECK20:       cond.false:
20264 // CHECK20-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
20265 // CHECK20-NEXT:    br label [[COND_END]]
20266 // CHECK20:       cond.end:
20267 // CHECK20-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
20268 // CHECK20-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
20269 // CHECK20-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
20270 // CHECK20-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
20271 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20272 // CHECK20:       omp.inner.for.cond:
20273 // CHECK20-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
20274 // CHECK20-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !30
20275 // CHECK20-NEXT:    [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
20276 // CHECK20-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20277 // CHECK20:       omp.inner.for.body:
20278 // CHECK20-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
20279 // CHECK20-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
20280 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
20281 // CHECK20-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !30
20282 // CHECK20-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !30
20283 // CHECK20-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
20284 // CHECK20-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !30
20285 // CHECK20-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !30
20286 // CHECK20-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
20287 // CHECK20-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
20288 // CHECK20-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
20289 // CHECK20-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !30
20290 // CHECK20-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
20291 // CHECK20-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !30
20292 // CHECK20-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
20293 // CHECK20-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !30
20294 // CHECK20-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20295 // CHECK20:       omp.body.continue:
20296 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20297 // CHECK20:       omp.inner.for.inc:
20298 // CHECK20-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
20299 // CHECK20-NEXT:    [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1
20300 // CHECK20-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !30
20301 // CHECK20-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
20302 // CHECK20:       omp.inner.for.end:
20303 // CHECK20-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20304 // CHECK20:       omp.loop.exit:
20305 // CHECK20-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
20306 // CHECK20-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20307 // CHECK20-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
20308 // CHECK20-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20309 // CHECK20:       .omp.final.then:
20310 // CHECK20-NEXT:    store i64 11, i64* [[I]], align 8
20311 // CHECK20-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20312 // CHECK20:       .omp.final.done:
20313 // CHECK20-NEXT:    ret void
20314 //
20315 //
20316 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
20317 // CHECK21-SAME: () #[[ATTR0:[0-9]+]] {
20318 // CHECK21-NEXT:  entry:
20319 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
20320 // CHECK21-NEXT:    ret void
20321 //
20322 //
20323 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined.
20324 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
20325 // CHECK21-NEXT:  entry:
20326 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20327 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20328 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20329 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20330 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20331 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20332 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20333 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20334 // CHECK21-NEXT:    [[I:%.*]] = alloca i32, align 4
20335 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20336 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20337 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20338 // CHECK21-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
20339 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20340 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20341 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20342 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
20343 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20344 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20345 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
20346 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20347 // CHECK21:       cond.true:
20348 // CHECK21-NEXT:    br label [[COND_END:%.*]]
20349 // CHECK21:       cond.false:
20350 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20351 // CHECK21-NEXT:    br label [[COND_END]]
20352 // CHECK21:       cond.end:
20353 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
20354 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20355 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20356 // CHECK21-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
20357 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20358 // CHECK21:       omp.inner.for.cond:
20359 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
20360 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
20361 // CHECK21-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
20362 // CHECK21-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20363 // CHECK21:       omp.inner.for.body:
20364 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
20365 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
20366 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
20367 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
20368 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20369 // CHECK21:       omp.body.continue:
20370 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20371 // CHECK21:       omp.inner.for.inc:
20372 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
20373 // CHECK21-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
20374 // CHECK21-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
20375 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
20376 // CHECK21:       omp.inner.for.end:
20377 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20378 // CHECK21:       omp.loop.exit:
20379 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
20380 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20381 // CHECK21-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
20382 // CHECK21-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20383 // CHECK21:       .omp.final.then:
20384 // CHECK21-NEXT:    store i32 33, i32* [[I]], align 4
20385 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20386 // CHECK21:       .omp.final.done:
20387 // CHECK21-NEXT:    ret void
20388 //
20389 //
20390 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
20391 // CHECK21-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] {
20392 // CHECK21-NEXT:  entry:
20393 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
20394 // CHECK21-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
20395 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20396 // CHECK21-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
20397 // CHECK21-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
20398 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
20399 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
20400 // CHECK21-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
20401 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20402 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20403 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
20404 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20405 // CHECK21-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
20406 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
20407 // CHECK21-NEXT:    store i16 [[TMP0]], i16* [[CONV3]], align 2
20408 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
20409 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
20410 // CHECK21-NEXT:    [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
20411 // CHECK21-NEXT:    store i32 [[TMP2]], i32* [[CONV4]], align 4
20412 // CHECK21-NEXT:    [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
20413 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4
20414 // CHECK21-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
20415 // CHECK21-NEXT:    store i32 [[TMP4]], i32* [[CONV5]], align 4
20416 // CHECK21-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8
20417 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]])
20418 // CHECK21-NEXT:    ret void
20419 //
20420 //
20421 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..1
20422 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
20423 // CHECK21-NEXT:  entry:
20424 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20425 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20426 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
20427 // CHECK21-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
20428 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20429 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
20430 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i64, align 8
20431 // CHECK21-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
20432 // CHECK21-NEXT:    [[DOTLINEAR_START3:%.*]] = alloca i32, align 4
20433 // CHECK21-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
20434 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
20435 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
20436 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
20437 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20438 // CHECK21-NEXT:    [[IT:%.*]] = alloca i64, align 8
20439 // CHECK21-NEXT:    [[LIN4:%.*]] = alloca i32, align 4
20440 // CHECK21-NEXT:    [[A5:%.*]] = alloca i32, align 4
20441 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20442 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20443 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
20444 // CHECK21-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
20445 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20446 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20447 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
20448 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20449 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4
20450 // CHECK21-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
20451 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4
20452 // CHECK21-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4
20453 // CHECK21-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]]
20454 // CHECK21-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
20455 // CHECK21-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
20456 // CHECK21-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
20457 // CHECK21-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
20458 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20459 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20460 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
20461 // CHECK21-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
20462 // CHECK21-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
20463 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
20464 // CHECK21-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
20465 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20466 // CHECK21:       cond.true:
20467 // CHECK21-NEXT:    br label [[COND_END:%.*]]
20468 // CHECK21:       cond.false:
20469 // CHECK21-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
20470 // CHECK21-NEXT:    br label [[COND_END]]
20471 // CHECK21:       cond.end:
20472 // CHECK21-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
20473 // CHECK21-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
20474 // CHECK21-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
20475 // CHECK21-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
20476 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20477 // CHECK21:       omp.inner.for.cond:
20478 // CHECK21-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
20479 // CHECK21-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !17
20480 // CHECK21-NEXT:    [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
20481 // CHECK21-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20482 // CHECK21:       omp.inner.for.body:
20483 // CHECK21-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
20484 // CHECK21-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
20485 // CHECK21-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
20486 // CHECK21-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !17
20487 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !17
20488 // CHECK21-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP10]] to i64
20489 // CHECK21-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
20490 // CHECK21-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17
20491 // CHECK21-NEXT:    [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]]
20492 // CHECK21-NEXT:    [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]]
20493 // CHECK21-NEXT:    [[CONV9:%.*]] = trunc i64 [[ADD]] to i32
20494 // CHECK21-NEXT:    store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !17
20495 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !17
20496 // CHECK21-NEXT:    [[CONV10:%.*]] = sext i32 [[TMP13]] to i64
20497 // CHECK21-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
20498 // CHECK21-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17
20499 // CHECK21-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]]
20500 // CHECK21-NEXT:    [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]]
20501 // CHECK21-NEXT:    [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32
20502 // CHECK21-NEXT:    store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !17
20503 // CHECK21-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !17
20504 // CHECK21-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP16]] to i32
20505 // CHECK21-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
20506 // CHECK21-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
20507 // CHECK21-NEXT:    store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group !17
20508 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20509 // CHECK21:       omp.body.continue:
20510 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20511 // CHECK21:       omp.inner.for.inc:
20512 // CHECK21-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
20513 // CHECK21-NEXT:    [[ADD17:%.*]] = add i64 [[TMP17]], 1
20514 // CHECK21-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
20515 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
20516 // CHECK21:       omp.inner.for.end:
20517 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20518 // CHECK21:       omp.loop.exit:
20519 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
20520 // CHECK21-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20521 // CHECK21-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
20522 // CHECK21-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20523 // CHECK21:       .omp.final.then:
20524 // CHECK21-NEXT:    store i64 400, i64* [[IT]], align 8
20525 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20526 // CHECK21:       .omp.final.done:
20527 // CHECK21-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20528 // CHECK21-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
20529 // CHECK21-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
20530 // CHECK21:       .omp.linear.pu:
20531 // CHECK21-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4
20532 // CHECK21-NEXT:    store i32 [[TMP22]], i32* [[CONV1]], align 4
20533 // CHECK21-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A5]], align 4
20534 // CHECK21-NEXT:    store i32 [[TMP23]], i32* [[CONV2]], align 4
20535 // CHECK21-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
20536 // CHECK21:       .omp.linear.pu.done:
20537 // CHECK21-NEXT:    ret void
20538 //
20539 //
20540 // CHECK21-LABEL: define {{[^@]+}}@_Z7get_valv
20541 // CHECK21-SAME: () #[[ATTR3:[0-9]+]] {
20542 // CHECK21-NEXT:  entry:
20543 // CHECK21-NEXT:    ret i64 0
20544 //
20545 //
20546 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
20547 // CHECK21-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
20548 // CHECK21-NEXT:  entry:
20549 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20550 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
20551 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
20552 // CHECK21-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
20553 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20554 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
20555 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20556 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20557 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
20558 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
20559 // CHECK21-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
20560 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
20561 // CHECK21-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
20562 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
20563 // CHECK21-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
20564 // CHECK21-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
20565 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
20566 // CHECK21-NEXT:    ret void
20567 //
20568 //
20569 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..2
20570 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
20571 // CHECK21-NEXT:  entry:
20572 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20573 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20574 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20575 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
20576 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20577 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i16, align 2
20578 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20579 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20580 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20581 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20582 // CHECK21-NEXT:    [[IT:%.*]] = alloca i16, align 2
20583 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20584 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20585 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20586 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
20587 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20588 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20589 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20590 // CHECK21-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
20591 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20592 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20593 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20594 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
20595 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
20596 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20597 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
20598 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20599 // CHECK21:       cond.true:
20600 // CHECK21-NEXT:    br label [[COND_END:%.*]]
20601 // CHECK21:       cond.false:
20602 // CHECK21-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20603 // CHECK21-NEXT:    br label [[COND_END]]
20604 // CHECK21:       cond.end:
20605 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
20606 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20607 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20608 // CHECK21-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
20609 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20610 // CHECK21:       omp.inner.for.cond:
20611 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
20612 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
20613 // CHECK21-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
20614 // CHECK21-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20615 // CHECK21:       omp.inner.for.body:
20616 // CHECK21-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
20617 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
20618 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
20619 // CHECK21-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
20620 // CHECK21-NEXT:    store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !20
20621 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !20
20622 // CHECK21-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
20623 // CHECK21-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !20
20624 // CHECK21-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !20
20625 // CHECK21-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
20626 // CHECK21-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
20627 // CHECK21-NEXT:    [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
20628 // CHECK21-NEXT:    store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group !20
20629 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20630 // CHECK21:       omp.body.continue:
20631 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20632 // CHECK21:       omp.inner.for.inc:
20633 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
20634 // CHECK21-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
20635 // CHECK21-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
20636 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
20637 // CHECK21:       omp.inner.for.end:
20638 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
20639 // CHECK21:       omp.loop.exit:
20640 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
20641 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20642 // CHECK21-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
20643 // CHECK21-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20644 // CHECK21:       .omp.final.then:
20645 // CHECK21-NEXT:    store i16 22, i16* [[IT]], align 2
20646 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20647 // CHECK21:       .omp.final.done:
20648 // CHECK21-NEXT:    ret void
20649 //
20650 //
20651 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
20652 // CHECK21-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
20653 // CHECK21-NEXT:  entry:
20654 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20655 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
20656 // CHECK21-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20657 // CHECK21-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
20658 // CHECK21-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
20659 // CHECK21-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
20660 // CHECK21-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
20661 // CHECK21-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
20662 // CHECK21-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
20663 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20664 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
20665 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
20666 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20667 // CHECK21-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
20668 // CHECK21-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20669 // CHECK21-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
20670 // CHECK21-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
20671 // CHECK21-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
20672 // CHECK21-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
20673 // CHECK21-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
20674 // CHECK21-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
20675 // CHECK21-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20676 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20677 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
20678 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20679 // CHECK21-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
20680 // CHECK21-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
20681 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
20682 // CHECK21-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
20683 // CHECK21-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
20684 // CHECK21-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
20685 // CHECK21-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
20686 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
20687 // CHECK21-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
20688 // CHECK21-NEXT:    store i32 [[TMP8]], i32* [[CONV6]], align 4
20689 // CHECK21-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
20690 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4
20691 // CHECK21-NEXT:    [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
20692 // CHECK21-NEXT:    store i32 [[TMP10]], i32* [[CONV7]], align 4
20693 // CHECK21-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
20694 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]])
20695 // CHECK21-NEXT:    ret void
20696 //
20697 //
20698 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..3
20699 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
20700 // CHECK21-NEXT:  entry:
20701 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20702 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20703 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20704 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
20705 // CHECK21-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20706 // CHECK21-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
20707 // CHECK21-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
20708 // CHECK21-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
20709 // CHECK21-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
20710 // CHECK21-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
20711 // CHECK21-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
20712 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20713 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20714 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i8, align 1
20715 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
20716 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
20717 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
20718 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20719 // CHECK21-NEXT:    [[IT:%.*]] = alloca i8, align 1
20720 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20721 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20722 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20723 // CHECK21-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
20724 // CHECK21-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20725 // CHECK21-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
20726 // CHECK21-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
20727 // CHECK21-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
20728 // CHECK21-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
20729 // CHECK21-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
20730 // CHECK21-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
20731 // CHECK21-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20732 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20733 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
20734 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20735 // CHECK21-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
20736 // CHECK21-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
20737 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
20738 // CHECK21-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
20739 // CHECK21-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
20740 // CHECK21-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
20741 // CHECK21-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
20742 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
20743 // CHECK21-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
20744 // CHECK21-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
20745 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20746 // CHECK21-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4
20747 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
20748 // CHECK21-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
20749 // CHECK21-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
20750 // CHECK21-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
20751 // CHECK21:       omp.dispatch.cond:
20752 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20753 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
20754 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
20755 // CHECK21:       cond.true:
20756 // CHECK21-NEXT:    br label [[COND_END:%.*]]
20757 // CHECK21:       cond.false:
20758 // CHECK21-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20759 // CHECK21-NEXT:    br label [[COND_END]]
20760 // CHECK21:       cond.end:
20761 // CHECK21-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
20762 // CHECK21-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
20763 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20764 // CHECK21-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
20765 // CHECK21-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
20766 // CHECK21-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20767 // CHECK21-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
20768 // CHECK21-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
20769 // CHECK21:       omp.dispatch.body:
20770 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
20771 // CHECK21:       omp.inner.for.cond:
20772 // CHECK21-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
20773 // CHECK21-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
20774 // CHECK21-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
20775 // CHECK21-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
20776 // CHECK21:       omp.inner.for.body:
20777 // CHECK21-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
20778 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
20779 // CHECK21-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
20780 // CHECK21-NEXT:    [[CONV8:%.*]] = trunc i32 [[SUB]] to i8
20781 // CHECK21-NEXT:    store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !23
20782 // CHECK21-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !23
20783 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
20784 // CHECK21-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group !23
20785 // CHECK21-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
20786 // CHECK21-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !23
20787 // CHECK21-NEXT:    [[CONV9:%.*]] = fpext float [[TMP20]] to double
20788 // CHECK21-NEXT:    [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
20789 // CHECK21-NEXT:    [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
20790 // CHECK21-NEXT:    store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !23
20791 // CHECK21-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
20792 // CHECK21-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !23
20793 // CHECK21-NEXT:    [[CONV13:%.*]] = fpext float [[TMP21]] to double
20794 // CHECK21-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
20795 // CHECK21-NEXT:    [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
20796 // CHECK21-NEXT:    store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !23
20797 // CHECK21-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
20798 // CHECK21-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
20799 // CHECK21-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !23
20800 // CHECK21-NEXT:    [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00
20801 // CHECK21-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !23
20802 // CHECK21-NEXT:    [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]]
20803 // CHECK21-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]]
20804 // CHECK21-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
20805 // CHECK21-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !23
20806 // CHECK21-NEXT:    [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00
20807 // CHECK21-NEXT:    store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !23
20808 // CHECK21-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
20809 // CHECK21-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !23
20810 // CHECK21-NEXT:    [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1
20811 // CHECK21-NEXT:    store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !23
20812 // CHECK21-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
20813 // CHECK21-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !23
20814 // CHECK21-NEXT:    [[CONV23:%.*]] = sext i8 [[TMP26]] to i32
20815 // CHECK21-NEXT:    [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
20816 // CHECK21-NEXT:    [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
20817 // CHECK21-NEXT:    store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !23
20818 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
20819 // CHECK21:       omp.body.continue:
20820 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
20821 // CHECK21:       omp.inner.for.inc:
20822 // CHECK21-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
20823 // CHECK21-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1
20824 // CHECK21-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
20825 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
20826 // CHECK21:       omp.inner.for.end:
20827 // CHECK21-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
20828 // CHECK21:       omp.dispatch.inc:
20829 // CHECK21-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
20830 // CHECK21-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20831 // CHECK21-NEXT:    [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
20832 // CHECK21-NEXT:    store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4
20833 // CHECK21-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
20834 // CHECK21-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
20835 // CHECK21-NEXT:    [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
20836 // CHECK21-NEXT:    store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4
20837 // CHECK21-NEXT:    br label [[OMP_DISPATCH_COND]]
20838 // CHECK21:       omp.dispatch.end:
20839 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
20840 // CHECK21-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
20841 // CHECK21-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
20842 // CHECK21-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
20843 // CHECK21:       .omp.final.then:
20844 // CHECK21-NEXT:    store i8 96, i8* [[IT]], align 1
20845 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
20846 // CHECK21:       .omp.final.done:
20847 // CHECK21-NEXT:    ret void
20848 //
20849 //
20850 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
20851 // CHECK21-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
20852 // CHECK21-NEXT:  entry:
20853 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20854 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
20855 // CHECK21-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
20856 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
20857 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
20858 // CHECK21-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
20859 // CHECK21-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
20860 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20861 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
20862 // CHECK21-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
20863 // CHECK21-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
20864 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20865 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20866 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
20867 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
20868 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
20869 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
20870 // CHECK21-NEXT:    store i32 [[TMP1]], i32* [[CONV3]], align 4
20871 // CHECK21-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
20872 // CHECK21-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
20873 // CHECK21-NEXT:    [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
20874 // CHECK21-NEXT:    store i16 [[TMP3]], i16* [[CONV4]], align 2
20875 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
20876 // CHECK21-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1
20877 // CHECK21-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
20878 // CHECK21-NEXT:    store i8 [[TMP5]], i8* [[CONV5]], align 1
20879 // CHECK21-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
20880 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
20881 // CHECK21-NEXT:    ret void
20882 //
20883 //
20884 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..4
20885 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
20886 // CHECK21-NEXT:  entry:
20887 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20888 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20889 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
20890 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
20891 // CHECK21-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
20892 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
20893 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
20894 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i32, align 4
20895 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20896 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20897 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
20898 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
20899 // CHECK21-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
20900 // CHECK21-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
20901 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
20902 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
20903 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
20904 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
20905 // CHECK21-NEXT:    ret void
20906 //
20907 //
20908 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214
20909 // CHECK21-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
20910 // CHECK21-NEXT:  entry:
20911 // CHECK21-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
20912 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
20913 // CHECK21-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20914 // CHECK21-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
20915 // CHECK21-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
20916 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20917 // CHECK21-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
20918 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
20919 // CHECK21-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
20920 // CHECK21-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
20921 // CHECK21-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
20922 // CHECK21-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
20923 // CHECK21-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
20924 // CHECK21-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20925 // CHECK21-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
20926 // CHECK21-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
20927 // CHECK21-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20928 // CHECK21-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
20929 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
20930 // CHECK21-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20931 // CHECK21-NEXT:    [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
20932 // CHECK21-NEXT:    [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8
20933 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
20934 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32, i32* [[CONV]], align 4
20935 // CHECK21-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32*
20936 // CHECK21-NEXT:    store i32 [[TMP5]], i32* [[CONV4]], align 4
20937 // CHECK21-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
20938 // CHECK21-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
20939 // CHECK21-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
20940 // CHECK21-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
20941 // CHECK21-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
20942 // CHECK21-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV5]], align 1
20943 // CHECK21-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
20944 // CHECK21-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1
20945 // CHECK21-NEXT:    [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1
20946 // CHECK21-NEXT:    br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
20947 // CHECK21:       omp_if.then:
20948 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]])
20949 // CHECK21-NEXT:    br label [[OMP_IF_END:%.*]]
20950 // CHECK21:       omp_if.else:
20951 // CHECK21-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
20952 // CHECK21-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
20953 // CHECK21-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
20954 // CHECK21-NEXT:    call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR2:[0-9]+]]
20955 // CHECK21-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
20956 // CHECK21-NEXT:    br label [[OMP_IF_END]]
20957 // CHECK21:       omp_if.end:
20958 // CHECK21-NEXT:    ret void
20959 //
20960 //
20961 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..5
20962 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
20963 // CHECK21-NEXT:  entry:
20964 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
20965 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
20966 // CHECK21-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
20967 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
20968 // CHECK21-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
20969 // CHECK21-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
20970 // CHECK21-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
20971 // CHECK21-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
20972 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
20973 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i64, align 8
20974 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
20975 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
20976 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
20977 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
20978 // CHECK21-NEXT:    [[IT:%.*]] = alloca i64, align 8
20979 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
20980 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
20981 // CHECK21-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
20982 // CHECK21-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
20983 // CHECK21-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
20984 // CHECK21-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
20985 // CHECK21-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
20986 // CHECK21-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
20987 // CHECK21-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
20988 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
20989 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
20990 // CHECK21-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
20991 // CHECK21-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
20992 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
20993 // CHECK21-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
20994 // CHECK21-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
20995 // CHECK21-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
20996 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
20997 // CHECK21-NEXT:    [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 1
20998 // CHECK21-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1
20999 // CHECK21-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
21000 // CHECK21:       omp_if.then:
21001 // CHECK21-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21002 // CHECK21-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
21003 // CHECK21-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
21004 // CHECK21-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21005 // CHECK21-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3
21006 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21007 // CHECK21:       cond.true:
21008 // CHECK21-NEXT:    br label [[COND_END:%.*]]
21009 // CHECK21:       cond.false:
21010 // CHECK21-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21011 // CHECK21-NEXT:    br label [[COND_END]]
21012 // CHECK21:       cond.end:
21013 // CHECK21-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
21014 // CHECK21-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
21015 // CHECK21-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
21016 // CHECK21-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8
21017 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21018 // CHECK21:       omp.inner.for.cond:
21019 // CHECK21-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
21020 // CHECK21-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !26
21021 // CHECK21-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]]
21022 // CHECK21-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21023 // CHECK21:       omp.inner.for.body:
21024 // CHECK21-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
21025 // CHECK21-NEXT:    [[MUL:%.*]] = mul i64 [[TMP12]], 400
21026 // CHECK21-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
21027 // CHECK21-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !26
21028 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !26
21029 // CHECK21-NEXT:    [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double
21030 // CHECK21-NEXT:    [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00
21031 // CHECK21-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
21032 // CHECK21-NEXT:    store double [[ADD]], double* [[A]], align 8, !nontemporal !27, !llvm.access.group !26
21033 // CHECK21-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
21034 // CHECK21-NEXT:    [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !27, !llvm.access.group !26
21035 // CHECK21-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
21036 // CHECK21-NEXT:    store double [[INC]], double* [[A6]], align 8, !nontemporal !27, !llvm.access.group !26
21037 // CHECK21-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
21038 // CHECK21-NEXT:    [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]]
21039 // CHECK21-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]]
21040 // CHECK21-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
21041 // CHECK21-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !26
21042 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21043 // CHECK21:       omp.body.continue:
21044 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21045 // CHECK21:       omp.inner.for.inc:
21046 // CHECK21-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
21047 // CHECK21-NEXT:    [[ADD9:%.*]] = add i64 [[TMP16]], 1
21048 // CHECK21-NEXT:    store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
21049 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
21050 // CHECK21:       omp.inner.for.end:
21051 // CHECK21-NEXT:    br label [[OMP_IF_END:%.*]]
21052 // CHECK21:       omp_if.else:
21053 // CHECK21-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21054 // CHECK21-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
21055 // CHECK21-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
21056 // CHECK21-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21057 // CHECK21-NEXT:    [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3
21058 // CHECK21-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
21059 // CHECK21:       cond.true11:
21060 // CHECK21-NEXT:    br label [[COND_END13:%.*]]
21061 // CHECK21:       cond.false12:
21062 // CHECK21-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21063 // CHECK21-NEXT:    br label [[COND_END13]]
21064 // CHECK21:       cond.end13:
21065 // CHECK21-NEXT:    [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ]
21066 // CHECK21-NEXT:    store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8
21067 // CHECK21-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
21068 // CHECK21-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8
21069 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND15:%.*]]
21070 // CHECK21:       omp.inner.for.cond15:
21071 // CHECK21-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
21072 // CHECK21-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21073 // CHECK21-NEXT:    [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
21074 // CHECK21-NEXT:    br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]]
21075 // CHECK21:       omp.inner.for.body17:
21076 // CHECK21-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
21077 // CHECK21-NEXT:    [[MUL18:%.*]] = mul i64 [[TMP24]], 400
21078 // CHECK21-NEXT:    [[SUB19:%.*]] = sub i64 2000, [[MUL18]]
21079 // CHECK21-NEXT:    store i64 [[SUB19]], i64* [[IT]], align 8
21080 // CHECK21-NEXT:    [[TMP25:%.*]] = load i32, i32* [[CONV]], align 4
21081 // CHECK21-NEXT:    [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double
21082 // CHECK21-NEXT:    [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00
21083 // CHECK21-NEXT:    [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
21084 // CHECK21-NEXT:    store double [[ADD21]], double* [[A22]], align 8
21085 // CHECK21-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
21086 // CHECK21-NEXT:    [[TMP26:%.*]] = load double, double* [[A23]], align 8
21087 // CHECK21-NEXT:    [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00
21088 // CHECK21-NEXT:    store double [[INC24]], double* [[A23]], align 8
21089 // CHECK21-NEXT:    [[CONV25:%.*]] = fptosi double [[INC24]] to i16
21090 // CHECK21-NEXT:    [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]]
21091 // CHECK21-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]]
21092 // CHECK21-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1
21093 // CHECK21-NEXT:    store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2
21094 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE28:%.*]]
21095 // CHECK21:       omp.body.continue28:
21096 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC29:%.*]]
21097 // CHECK21:       omp.inner.for.inc29:
21098 // CHECK21-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
21099 // CHECK21-NEXT:    [[ADD30:%.*]] = add i64 [[TMP28]], 1
21100 // CHECK21-NEXT:    store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8
21101 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP30:![0-9]+]]
21102 // CHECK21:       omp.inner.for.end31:
21103 // CHECK21-NEXT:    br label [[OMP_IF_END]]
21104 // CHECK21:       omp_if.end:
21105 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21106 // CHECK21:       omp.loop.exit:
21107 // CHECK21-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21108 // CHECK21-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
21109 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
21110 // CHECK21-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21111 // CHECK21-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
21112 // CHECK21-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21113 // CHECK21:       .omp.final.then:
21114 // CHECK21-NEXT:    store i64 400, i64* [[IT]], align 8
21115 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21116 // CHECK21:       .omp.final.done:
21117 // CHECK21-NEXT:    ret void
21118 //
21119 //
21120 // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
21121 // CHECK21-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
21122 // CHECK21-NEXT:  entry:
21123 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21124 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
21125 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
21126 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
21127 // CHECK21-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
21128 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21129 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
21130 // CHECK21-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
21131 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21132 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
21133 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
21134 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
21135 // CHECK21-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
21136 // CHECK21-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
21137 // CHECK21-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
21138 // CHECK21-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
21139 // CHECK21-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
21140 // CHECK21-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
21141 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
21142 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
21143 // CHECK21-NEXT:    ret void
21144 //
21145 //
21146 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..6
21147 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
21148 // CHECK21-NEXT:  entry:
21149 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21150 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21151 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21152 // CHECK21-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
21153 // CHECK21-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
21154 // CHECK21-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
21155 // CHECK21-NEXT:    [[TMP:%.*]] = alloca i64, align 8
21156 // CHECK21-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
21157 // CHECK21-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
21158 // CHECK21-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
21159 // CHECK21-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21160 // CHECK21-NEXT:    [[I:%.*]] = alloca i64, align 8
21161 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21162 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21163 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21164 // CHECK21-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
21165 // CHECK21-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
21166 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21167 // CHECK21-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
21168 // CHECK21-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
21169 // CHECK21-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
21170 // CHECK21-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
21171 // CHECK21-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
21172 // CHECK21-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21173 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21174 // CHECK21-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
21175 // CHECK21-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
21176 // CHECK21-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21177 // CHECK21-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
21178 // CHECK21-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21179 // CHECK21:       cond.true:
21180 // CHECK21-NEXT:    br label [[COND_END:%.*]]
21181 // CHECK21:       cond.false:
21182 // CHECK21-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21183 // CHECK21-NEXT:    br label [[COND_END]]
21184 // CHECK21:       cond.end:
21185 // CHECK21-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
21186 // CHECK21-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
21187 // CHECK21-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
21188 // CHECK21-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
21189 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21190 // CHECK21:       omp.inner.for.cond:
21191 // CHECK21-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32
21192 // CHECK21-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !32
21193 // CHECK21-NEXT:    [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
21194 // CHECK21-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21195 // CHECK21:       omp.inner.for.body:
21196 // CHECK21-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32
21197 // CHECK21-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
21198 // CHECK21-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
21199 // CHECK21-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !32
21200 // CHECK21-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !32
21201 // CHECK21-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
21202 // CHECK21-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !32
21203 // CHECK21-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !32
21204 // CHECK21-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
21205 // CHECK21-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
21206 // CHECK21-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
21207 // CHECK21-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !32
21208 // CHECK21-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
21209 // CHECK21-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
21210 // CHECK21-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
21211 // CHECK21-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
21212 // CHECK21-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21213 // CHECK21:       omp.body.continue:
21214 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21215 // CHECK21:       omp.inner.for.inc:
21216 // CHECK21-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32
21217 // CHECK21-NEXT:    [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1
21218 // CHECK21-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32
21219 // CHECK21-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
21220 // CHECK21:       omp.inner.for.end:
21221 // CHECK21-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21222 // CHECK21:       omp.loop.exit:
21223 // CHECK21-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
21224 // CHECK21-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21225 // CHECK21-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
21226 // CHECK21-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21227 // CHECK21:       .omp.final.then:
21228 // CHECK21-NEXT:    store i64 11, i64* [[I]], align 8
21229 // CHECK21-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21230 // CHECK21:       .omp.final.done:
21231 // CHECK21-NEXT:    ret void
21232 //
21233 //
21234 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
21235 // CHECK22-SAME: () #[[ATTR0:[0-9]+]] {
21236 // CHECK22-NEXT:  entry:
21237 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
21238 // CHECK22-NEXT:    ret void
21239 //
21240 //
21241 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined.
21242 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
21243 // CHECK22-NEXT:  entry:
21244 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21245 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21246 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21247 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21248 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21249 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21250 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21251 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21252 // CHECK22-NEXT:    [[I:%.*]] = alloca i32, align 4
21253 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21254 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21255 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21256 // CHECK22-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
21257 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21258 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21259 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21260 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
21261 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21262 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21263 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
21264 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21265 // CHECK22:       cond.true:
21266 // CHECK22-NEXT:    br label [[COND_END:%.*]]
21267 // CHECK22:       cond.false:
21268 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21269 // CHECK22-NEXT:    br label [[COND_END]]
21270 // CHECK22:       cond.end:
21271 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
21272 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21273 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21274 // CHECK22-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
21275 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21276 // CHECK22:       omp.inner.for.cond:
21277 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
21278 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
21279 // CHECK22-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
21280 // CHECK22-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21281 // CHECK22:       omp.inner.for.body:
21282 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
21283 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
21284 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
21285 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
21286 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21287 // CHECK22:       omp.body.continue:
21288 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21289 // CHECK22:       omp.inner.for.inc:
21290 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
21291 // CHECK22-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
21292 // CHECK22-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
21293 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
21294 // CHECK22:       omp.inner.for.end:
21295 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21296 // CHECK22:       omp.loop.exit:
21297 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
21298 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21299 // CHECK22-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
21300 // CHECK22-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21301 // CHECK22:       .omp.final.then:
21302 // CHECK22-NEXT:    store i32 33, i32* [[I]], align 4
21303 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21304 // CHECK22:       .omp.final.done:
21305 // CHECK22-NEXT:    ret void
21306 //
21307 //
21308 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
21309 // CHECK22-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] {
21310 // CHECK22-NEXT:  entry:
21311 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
21312 // CHECK22-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
21313 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21314 // CHECK22-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
21315 // CHECK22-NEXT:    [[LIN_CASTED:%.*]] = alloca i64, align 8
21316 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
21317 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
21318 // CHECK22-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
21319 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21320 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
21321 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
21322 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21323 // CHECK22-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
21324 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
21325 // CHECK22-NEXT:    store i16 [[TMP0]], i16* [[CONV3]], align 2
21326 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8
21327 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4
21328 // CHECK22-NEXT:    [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32*
21329 // CHECK22-NEXT:    store i32 [[TMP2]], i32* [[CONV4]], align 4
21330 // CHECK22-NEXT:    [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8
21331 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4
21332 // CHECK22-NEXT:    [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32*
21333 // CHECK22-NEXT:    store i32 [[TMP4]], i32* [[CONV5]], align 4
21334 // CHECK22-NEXT:    [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8
21335 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]])
21336 // CHECK22-NEXT:    ret void
21337 //
21338 //
21339 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..1
21340 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
21341 // CHECK22-NEXT:  entry:
21342 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21343 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21344 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
21345 // CHECK22-NEXT:    [[LIN_ADDR:%.*]] = alloca i64, align 8
21346 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21347 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
21348 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i64, align 8
21349 // CHECK22-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
21350 // CHECK22-NEXT:    [[DOTLINEAR_START3:%.*]] = alloca i32, align 4
21351 // CHECK22-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
21352 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
21353 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
21354 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
21355 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21356 // CHECK22-NEXT:    [[IT:%.*]] = alloca i64, align 8
21357 // CHECK22-NEXT:    [[LIN4:%.*]] = alloca i32, align 4
21358 // CHECK22-NEXT:    [[A5:%.*]] = alloca i32, align 4
21359 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21360 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21361 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
21362 // CHECK22-NEXT:    store i64 [[LIN]], i64* [[LIN_ADDR]], align 8
21363 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21364 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
21365 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32*
21366 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21367 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4
21368 // CHECK22-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
21369 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4
21370 // CHECK22-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4
21371 // CHECK22-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]]
21372 // CHECK22-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
21373 // CHECK22-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
21374 // CHECK22-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
21375 // CHECK22-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
21376 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21377 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21378 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
21379 // CHECK22-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
21380 // CHECK22-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
21381 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21382 // CHECK22-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
21383 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21384 // CHECK22:       cond.true:
21385 // CHECK22-NEXT:    br label [[COND_END:%.*]]
21386 // CHECK22:       cond.false:
21387 // CHECK22-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21388 // CHECK22-NEXT:    br label [[COND_END]]
21389 // CHECK22:       cond.end:
21390 // CHECK22-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
21391 // CHECK22-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
21392 // CHECK22-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
21393 // CHECK22-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
21394 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21395 // CHECK22:       omp.inner.for.cond:
21396 // CHECK22-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
21397 // CHECK22-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !17
21398 // CHECK22-NEXT:    [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
21399 // CHECK22-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21400 // CHECK22:       omp.inner.for.body:
21401 // CHECK22-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
21402 // CHECK22-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
21403 // CHECK22-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
21404 // CHECK22-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !17
21405 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !17
21406 // CHECK22-NEXT:    [[CONV7:%.*]] = sext i32 [[TMP10]] to i64
21407 // CHECK22-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
21408 // CHECK22-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17
21409 // CHECK22-NEXT:    [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]]
21410 // CHECK22-NEXT:    [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]]
21411 // CHECK22-NEXT:    [[CONV9:%.*]] = trunc i64 [[ADD]] to i32
21412 // CHECK22-NEXT:    store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group !17
21413 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group !17
21414 // CHECK22-NEXT:    [[CONV10:%.*]] = sext i32 [[TMP13]] to i64
21415 // CHECK22-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
21416 // CHECK22-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !17
21417 // CHECK22-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]]
21418 // CHECK22-NEXT:    [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]]
21419 // CHECK22-NEXT:    [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32
21420 // CHECK22-NEXT:    store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group !17
21421 // CHECK22-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !17
21422 // CHECK22-NEXT:    [[CONV14:%.*]] = sext i16 [[TMP16]] to i32
21423 // CHECK22-NEXT:    [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1
21424 // CHECK22-NEXT:    [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16
21425 // CHECK22-NEXT:    store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group !17
21426 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21427 // CHECK22:       omp.body.continue:
21428 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21429 // CHECK22:       omp.inner.for.inc:
21430 // CHECK22-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
21431 // CHECK22-NEXT:    [[ADD17:%.*]] = add i64 [[TMP17]], 1
21432 // CHECK22-NEXT:    store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !17
21433 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
21434 // CHECK22:       omp.inner.for.end:
21435 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21436 // CHECK22:       omp.loop.exit:
21437 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
21438 // CHECK22-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21439 // CHECK22-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
21440 // CHECK22-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21441 // CHECK22:       .omp.final.then:
21442 // CHECK22-NEXT:    store i64 400, i64* [[IT]], align 8
21443 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21444 // CHECK22:       .omp.final.done:
21445 // CHECK22-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21446 // CHECK22-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
21447 // CHECK22-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
21448 // CHECK22:       .omp.linear.pu:
21449 // CHECK22-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4
21450 // CHECK22-NEXT:    store i32 [[TMP22]], i32* [[CONV1]], align 4
21451 // CHECK22-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A5]], align 4
21452 // CHECK22-NEXT:    store i32 [[TMP23]], i32* [[CONV2]], align 4
21453 // CHECK22-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
21454 // CHECK22:       .omp.linear.pu.done:
21455 // CHECK22-NEXT:    ret void
21456 //
21457 //
21458 // CHECK22-LABEL: define {{[^@]+}}@_Z7get_valv
21459 // CHECK22-SAME: () #[[ATTR3:[0-9]+]] {
21460 // CHECK22-NEXT:  entry:
21461 // CHECK22-NEXT:    ret i64 0
21462 //
21463 //
21464 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
21465 // CHECK22-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] {
21466 // CHECK22-NEXT:  entry:
21467 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21468 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
21469 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
21470 // CHECK22-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
21471 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21472 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
21473 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21474 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
21475 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
21476 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
21477 // CHECK22-NEXT:    store i32 [[TMP0]], i32* [[CONV2]], align 4
21478 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
21479 // CHECK22-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2
21480 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
21481 // CHECK22-NEXT:    store i16 [[TMP2]], i16* [[CONV3]], align 2
21482 // CHECK22-NEXT:    [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8
21483 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]])
21484 // CHECK22-NEXT:    ret void
21485 //
21486 //
21487 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..2
21488 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] {
21489 // CHECK22-NEXT:  entry:
21490 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21491 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21492 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21493 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
21494 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21495 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i16, align 2
21496 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21497 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21498 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21499 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21500 // CHECK22-NEXT:    [[IT:%.*]] = alloca i16, align 2
21501 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21502 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21503 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21504 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
21505 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21506 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
21507 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21508 // CHECK22-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
21509 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21510 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21511 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21512 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
21513 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
21514 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21515 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
21516 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21517 // CHECK22:       cond.true:
21518 // CHECK22-NEXT:    br label [[COND_END:%.*]]
21519 // CHECK22:       cond.false:
21520 // CHECK22-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21521 // CHECK22-NEXT:    br label [[COND_END]]
21522 // CHECK22:       cond.end:
21523 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
21524 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21525 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21526 // CHECK22-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
21527 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21528 // CHECK22:       omp.inner.for.cond:
21529 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
21530 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !20
21531 // CHECK22-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
21532 // CHECK22-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21533 // CHECK22:       omp.inner.for.body:
21534 // CHECK22-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
21535 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
21536 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
21537 // CHECK22-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
21538 // CHECK22-NEXT:    store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group !20
21539 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !20
21540 // CHECK22-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1
21541 // CHECK22-NEXT:    store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group !20
21542 // CHECK22-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !20
21543 // CHECK22-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP9]] to i32
21544 // CHECK22-NEXT:    [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1
21545 // CHECK22-NEXT:    [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
21546 // CHECK22-NEXT:    store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group !20
21547 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21548 // CHECK22:       omp.body.continue:
21549 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21550 // CHECK22:       omp.inner.for.inc:
21551 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
21552 // CHECK22-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
21553 // CHECK22-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
21554 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
21555 // CHECK22:       omp.inner.for.end:
21556 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
21557 // CHECK22:       omp.loop.exit:
21558 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
21559 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21560 // CHECK22-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
21561 // CHECK22-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21562 // CHECK22:       .omp.final.then:
21563 // CHECK22-NEXT:    store i16 22, i16* [[IT]], align 2
21564 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21565 // CHECK22:       .omp.final.done:
21566 // CHECK22-NEXT:    ret void
21567 //
21568 //
21569 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
21570 // CHECK22-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
21571 // CHECK22-NEXT:  entry:
21572 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21573 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
21574 // CHECK22-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
21575 // CHECK22-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
21576 // CHECK22-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
21577 // CHECK22-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
21578 // CHECK22-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
21579 // CHECK22-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
21580 // CHECK22-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
21581 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21582 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
21583 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
21584 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21585 // CHECK22-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
21586 // CHECK22-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
21587 // CHECK22-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
21588 // CHECK22-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
21589 // CHECK22-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
21590 // CHECK22-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
21591 // CHECK22-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
21592 // CHECK22-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
21593 // CHECK22-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
21594 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21595 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
21596 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
21597 // CHECK22-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
21598 // CHECK22-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
21599 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
21600 // CHECK22-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
21601 // CHECK22-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
21602 // CHECK22-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
21603 // CHECK22-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
21604 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4
21605 // CHECK22-NEXT:    [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32*
21606 // CHECK22-NEXT:    store i32 [[TMP8]], i32* [[CONV6]], align 4
21607 // CHECK22-NEXT:    [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8
21608 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4
21609 // CHECK22-NEXT:    [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
21610 // CHECK22-NEXT:    store i32 [[TMP10]], i32* [[CONV7]], align 4
21611 // CHECK22-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
21612 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]])
21613 // CHECK22-NEXT:    ret void
21614 //
21615 //
21616 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..3
21617 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
21618 // CHECK22-NEXT:  entry:
21619 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21620 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21621 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21622 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 8
21623 // CHECK22-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
21624 // CHECK22-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 8
21625 // CHECK22-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8
21626 // CHECK22-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
21627 // CHECK22-NEXT:    [[VLA_ADDR4:%.*]] = alloca i64, align 8
21628 // CHECK22-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 8
21629 // CHECK22-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 8
21630 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21631 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21632 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i8, align 1
21633 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
21634 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
21635 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
21636 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21637 // CHECK22-NEXT:    [[IT:%.*]] = alloca i8, align 1
21638 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21639 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21640 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21641 // CHECK22-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8
21642 // CHECK22-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
21643 // CHECK22-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 8
21644 // CHECK22-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8
21645 // CHECK22-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
21646 // CHECK22-NEXT:    store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8
21647 // CHECK22-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 8
21648 // CHECK22-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8
21649 // CHECK22-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
21650 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21651 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8
21652 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
21653 // CHECK22-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8
21654 // CHECK22-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8
21655 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
21656 // CHECK22-NEXT:    [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8
21657 // CHECK22-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8
21658 // CHECK22-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8
21659 // CHECK22-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
21660 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
21661 // CHECK22-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
21662 // CHECK22-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
21663 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21664 // CHECK22-NEXT:    [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4
21665 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21666 // CHECK22-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
21667 // CHECK22-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
21668 // CHECK22-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
21669 // CHECK22:       omp.dispatch.cond:
21670 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21671 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
21672 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21673 // CHECK22:       cond.true:
21674 // CHECK22-NEXT:    br label [[COND_END:%.*]]
21675 // CHECK22:       cond.false:
21676 // CHECK22-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21677 // CHECK22-NEXT:    br label [[COND_END]]
21678 // CHECK22:       cond.end:
21679 // CHECK22-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
21680 // CHECK22-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
21681 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21682 // CHECK22-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
21683 // CHECK22-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
21684 // CHECK22-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21685 // CHECK22-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
21686 // CHECK22-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
21687 // CHECK22:       omp.dispatch.body:
21688 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21689 // CHECK22:       omp.inner.for.cond:
21690 // CHECK22-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
21691 // CHECK22-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
21692 // CHECK22-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
21693 // CHECK22-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21694 // CHECK22:       omp.inner.for.body:
21695 // CHECK22-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
21696 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
21697 // CHECK22-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
21698 // CHECK22-NEXT:    [[CONV8:%.*]] = trunc i32 [[SUB]] to i8
21699 // CHECK22-NEXT:    store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group !23
21700 // CHECK22-NEXT:    [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !23
21701 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
21702 // CHECK22-NEXT:    store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group !23
21703 // CHECK22-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2
21704 // CHECK22-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !23
21705 // CHECK22-NEXT:    [[CONV9:%.*]] = fpext float [[TMP20]] to double
21706 // CHECK22-NEXT:    [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00
21707 // CHECK22-NEXT:    [[CONV11:%.*]] = fptrunc double [[ADD10]] to float
21708 // CHECK22-NEXT:    store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group !23
21709 // CHECK22-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3
21710 // CHECK22-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group !23
21711 // CHECK22-NEXT:    [[CONV13:%.*]] = fpext float [[TMP21]] to double
21712 // CHECK22-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00
21713 // CHECK22-NEXT:    [[CONV15:%.*]] = fptrunc double [[ADD14]] to float
21714 // CHECK22-NEXT:    store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group !23
21715 // CHECK22-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1
21716 // CHECK22-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2
21717 // CHECK22-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group !23
21718 // CHECK22-NEXT:    [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00
21719 // CHECK22-NEXT:    store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group !23
21720 // CHECK22-NEXT:    [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]]
21721 // CHECK22-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]]
21722 // CHECK22-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3
21723 // CHECK22-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group !23
21724 // CHECK22-NEXT:    [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00
21725 // CHECK22-NEXT:    store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group !23
21726 // CHECK22-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
21727 // CHECK22-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !23
21728 // CHECK22-NEXT:    [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1
21729 // CHECK22-NEXT:    store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group !23
21730 // CHECK22-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
21731 // CHECK22-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !23
21732 // CHECK22-NEXT:    [[CONV23:%.*]] = sext i8 [[TMP26]] to i32
21733 // CHECK22-NEXT:    [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1
21734 // CHECK22-NEXT:    [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8
21735 // CHECK22-NEXT:    store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group !23
21736 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21737 // CHECK22:       omp.body.continue:
21738 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21739 // CHECK22:       omp.inner.for.inc:
21740 // CHECK22-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
21741 // CHECK22-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1
21742 // CHECK22-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
21743 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
21744 // CHECK22:       omp.inner.for.end:
21745 // CHECK22-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
21746 // CHECK22:       omp.dispatch.inc:
21747 // CHECK22-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
21748 // CHECK22-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21749 // CHECK22-NEXT:    [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
21750 // CHECK22-NEXT:    store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4
21751 // CHECK22-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
21752 // CHECK22-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
21753 // CHECK22-NEXT:    [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
21754 // CHECK22-NEXT:    store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4
21755 // CHECK22-NEXT:    br label [[OMP_DISPATCH_COND]]
21756 // CHECK22:       omp.dispatch.end:
21757 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
21758 // CHECK22-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
21759 // CHECK22-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
21760 // CHECK22-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
21761 // CHECK22:       .omp.final.then:
21762 // CHECK22-NEXT:    store i8 96, i8* [[IT]], align 1
21763 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
21764 // CHECK22:       .omp.final.done:
21765 // CHECK22-NEXT:    ret void
21766 //
21767 //
21768 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
21769 // CHECK22-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
21770 // CHECK22-NEXT:  entry:
21771 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21772 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
21773 // CHECK22-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
21774 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
21775 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
21776 // CHECK22-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
21777 // CHECK22-NEXT:    [[AAA_CASTED:%.*]] = alloca i64, align 8
21778 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21779 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
21780 // CHECK22-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
21781 // CHECK22-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
21782 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21783 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
21784 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
21785 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
21786 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
21787 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32*
21788 // CHECK22-NEXT:    store i32 [[TMP1]], i32* [[CONV3]], align 4
21789 // CHECK22-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
21790 // CHECK22-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
21791 // CHECK22-NEXT:    [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
21792 // CHECK22-NEXT:    store i16 [[TMP3]], i16* [[CONV4]], align 2
21793 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
21794 // CHECK22-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1
21795 // CHECK22-NEXT:    [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8*
21796 // CHECK22-NEXT:    store i8 [[TMP5]], i8* [[CONV5]], align 1
21797 // CHECK22-NEXT:    [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8
21798 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]])
21799 // CHECK22-NEXT:    ret void
21800 //
21801 //
21802 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..4
21803 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
21804 // CHECK22-NEXT:  entry:
21805 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21806 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21807 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
21808 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
21809 // CHECK22-NEXT:    [[AAA_ADDR:%.*]] = alloca i64, align 8
21810 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
21811 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
21812 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i32, align 4
21813 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21814 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21815 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
21816 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
21817 // CHECK22-NEXT:    store i64 [[AAA]], i64* [[AAA_ADDR]], align 8
21818 // CHECK22-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
21819 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
21820 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
21821 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8*
21822 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
21823 // CHECK22-NEXT:    ret void
21824 //
21825 //
21826 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214
21827 // CHECK22-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
21828 // CHECK22-NEXT:  entry:
21829 // CHECK22-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
21830 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
21831 // CHECK22-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
21832 // CHECK22-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
21833 // CHECK22-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
21834 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21835 // CHECK22-NEXT:    [[B_CASTED:%.*]] = alloca i64, align 8
21836 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
21837 // CHECK22-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
21838 // CHECK22-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
21839 // CHECK22-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
21840 // CHECK22-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
21841 // CHECK22-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
21842 // CHECK22-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
21843 // CHECK22-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
21844 // CHECK22-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
21845 // CHECK22-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
21846 // CHECK22-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
21847 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
21848 // CHECK22-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
21849 // CHECK22-NEXT:    [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
21850 // CHECK22-NEXT:    [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8
21851 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
21852 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32, i32* [[CONV]], align 4
21853 // CHECK22-NEXT:    [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32*
21854 // CHECK22-NEXT:    store i32 [[TMP5]], i32* [[CONV4]], align 4
21855 // CHECK22-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8
21856 // CHECK22-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1
21857 // CHECK22-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
21858 // CHECK22-NEXT:    [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8*
21859 // CHECK22-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
21860 // CHECK22-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV5]], align 1
21861 // CHECK22-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
21862 // CHECK22-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1
21863 // CHECK22-NEXT:    [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1
21864 // CHECK22-NEXT:    br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
21865 // CHECK22:       omp_if.then:
21866 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]])
21867 // CHECK22-NEXT:    br label [[OMP_IF_END:%.*]]
21868 // CHECK22:       omp_if.else:
21869 // CHECK22-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
21870 // CHECK22-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
21871 // CHECK22-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
21872 // CHECK22-NEXT:    call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR2:[0-9]+]]
21873 // CHECK22-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
21874 // CHECK22-NEXT:    br label [[OMP_IF_END]]
21875 // CHECK22:       omp_if.end:
21876 // CHECK22-NEXT:    ret void
21877 //
21878 //
21879 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..5
21880 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
21881 // CHECK22-NEXT:  entry:
21882 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
21883 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
21884 // CHECK22-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
21885 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
21886 // CHECK22-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
21887 // CHECK22-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
21888 // CHECK22-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 8
21889 // CHECK22-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
21890 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
21891 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i64, align 8
21892 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
21893 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
21894 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
21895 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
21896 // CHECK22-NEXT:    [[IT:%.*]] = alloca i64, align 8
21897 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
21898 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
21899 // CHECK22-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
21900 // CHECK22-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
21901 // CHECK22-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
21902 // CHECK22-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
21903 // CHECK22-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 8
21904 // CHECK22-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
21905 // CHECK22-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
21906 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32*
21907 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
21908 // CHECK22-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
21909 // CHECK22-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8
21910 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8*
21911 // CHECK22-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
21912 // CHECK22-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
21913 // CHECK22-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
21914 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
21915 // CHECK22-NEXT:    [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 1
21916 // CHECK22-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1
21917 // CHECK22-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
21918 // CHECK22:       omp_if.then:
21919 // CHECK22-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21920 // CHECK22-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
21921 // CHECK22-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
21922 // CHECK22-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21923 // CHECK22-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3
21924 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
21925 // CHECK22:       cond.true:
21926 // CHECK22-NEXT:    br label [[COND_END:%.*]]
21927 // CHECK22:       cond.false:
21928 // CHECK22-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21929 // CHECK22-NEXT:    br label [[COND_END]]
21930 // CHECK22:       cond.end:
21931 // CHECK22-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
21932 // CHECK22-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
21933 // CHECK22-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
21934 // CHECK22-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8
21935 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
21936 // CHECK22:       omp.inner.for.cond:
21937 // CHECK22-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
21938 // CHECK22-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !26
21939 // CHECK22-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]]
21940 // CHECK22-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
21941 // CHECK22:       omp.inner.for.body:
21942 // CHECK22-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
21943 // CHECK22-NEXT:    [[MUL:%.*]] = mul i64 [[TMP12]], 400
21944 // CHECK22-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
21945 // CHECK22-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !26
21946 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !26
21947 // CHECK22-NEXT:    [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double
21948 // CHECK22-NEXT:    [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00
21949 // CHECK22-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
21950 // CHECK22-NEXT:    store double [[ADD]], double* [[A]], align 8, !nontemporal !27, !llvm.access.group !26
21951 // CHECK22-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
21952 // CHECK22-NEXT:    [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !27, !llvm.access.group !26
21953 // CHECK22-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
21954 // CHECK22-NEXT:    store double [[INC]], double* [[A6]], align 8, !nontemporal !27, !llvm.access.group !26
21955 // CHECK22-NEXT:    [[CONV7:%.*]] = fptosi double [[INC]] to i16
21956 // CHECK22-NEXT:    [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]]
21957 // CHECK22-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]]
21958 // CHECK22-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
21959 // CHECK22-NEXT:    store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group !26
21960 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
21961 // CHECK22:       omp.body.continue:
21962 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
21963 // CHECK22:       omp.inner.for.inc:
21964 // CHECK22-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
21965 // CHECK22-NEXT:    [[ADD9:%.*]] = add i64 [[TMP16]], 1
21966 // CHECK22-NEXT:    store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !26
21967 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
21968 // CHECK22:       omp.inner.for.end:
21969 // CHECK22-NEXT:    br label [[OMP_IF_END:%.*]]
21970 // CHECK22:       omp_if.else:
21971 // CHECK22-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
21972 // CHECK22-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
21973 // CHECK22-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
21974 // CHECK22-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21975 // CHECK22-NEXT:    [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3
21976 // CHECK22-NEXT:    br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
21977 // CHECK22:       cond.true11:
21978 // CHECK22-NEXT:    br label [[COND_END13:%.*]]
21979 // CHECK22:       cond.false12:
21980 // CHECK22-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21981 // CHECK22-NEXT:    br label [[COND_END13]]
21982 // CHECK22:       cond.end13:
21983 // CHECK22-NEXT:    [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ]
21984 // CHECK22-NEXT:    store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8
21985 // CHECK22-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
21986 // CHECK22-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8
21987 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND15:%.*]]
21988 // CHECK22:       omp.inner.for.cond15:
21989 // CHECK22-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
21990 // CHECK22-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
21991 // CHECK22-NEXT:    [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
21992 // CHECK22-NEXT:    br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]]
21993 // CHECK22:       omp.inner.for.body17:
21994 // CHECK22-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
21995 // CHECK22-NEXT:    [[MUL18:%.*]] = mul i64 [[TMP24]], 400
21996 // CHECK22-NEXT:    [[SUB19:%.*]] = sub i64 2000, [[MUL18]]
21997 // CHECK22-NEXT:    store i64 [[SUB19]], i64* [[IT]], align 8
21998 // CHECK22-NEXT:    [[TMP25:%.*]] = load i32, i32* [[CONV]], align 4
21999 // CHECK22-NEXT:    [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double
22000 // CHECK22-NEXT:    [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00
22001 // CHECK22-NEXT:    [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
22002 // CHECK22-NEXT:    store double [[ADD21]], double* [[A22]], align 8
22003 // CHECK22-NEXT:    [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
22004 // CHECK22-NEXT:    [[TMP26:%.*]] = load double, double* [[A23]], align 8
22005 // CHECK22-NEXT:    [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00
22006 // CHECK22-NEXT:    store double [[INC24]], double* [[A23]], align 8
22007 // CHECK22-NEXT:    [[CONV25:%.*]] = fptosi double [[INC24]] to i16
22008 // CHECK22-NEXT:    [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]]
22009 // CHECK22-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]]
22010 // CHECK22-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1
22011 // CHECK22-NEXT:    store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2
22012 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE28:%.*]]
22013 // CHECK22:       omp.body.continue28:
22014 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC29:%.*]]
22015 // CHECK22:       omp.inner.for.inc29:
22016 // CHECK22-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22017 // CHECK22-NEXT:    [[ADD30:%.*]] = add i64 [[TMP28]], 1
22018 // CHECK22-NEXT:    store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8
22019 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP30:![0-9]+]]
22020 // CHECK22:       omp.inner.for.end31:
22021 // CHECK22-NEXT:    br label [[OMP_IF_END]]
22022 // CHECK22:       omp_if.end:
22023 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22024 // CHECK22:       omp.loop.exit:
22025 // CHECK22-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22026 // CHECK22-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
22027 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
22028 // CHECK22-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22029 // CHECK22-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
22030 // CHECK22-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22031 // CHECK22:       .omp.final.then:
22032 // CHECK22-NEXT:    store i64 400, i64* [[IT]], align 8
22033 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22034 // CHECK22:       .omp.final.done:
22035 // CHECK22-NEXT:    ret void
22036 //
22037 //
22038 // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
22039 // CHECK22-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
22040 // CHECK22-NEXT:  entry:
22041 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22042 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22043 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
22044 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
22045 // CHECK22-NEXT:    [[AA_CASTED:%.*]] = alloca i64, align 8
22046 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22047 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22048 // CHECK22-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
22049 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22050 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22051 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
22052 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
22053 // CHECK22-NEXT:    [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32*
22054 // CHECK22-NEXT:    store i32 [[TMP1]], i32* [[CONV2]], align 4
22055 // CHECK22-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8
22056 // CHECK22-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2
22057 // CHECK22-NEXT:    [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16*
22058 // CHECK22-NEXT:    store i16 [[TMP3]], i16* [[CONV3]], align 2
22059 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8
22060 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]])
22061 // CHECK22-NEXT:    ret void
22062 //
22063 //
22064 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined..6
22065 // CHECK22-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
22066 // CHECK22-NEXT:  entry:
22067 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
22068 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
22069 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
22070 // CHECK22-NEXT:    [[AA_ADDR:%.*]] = alloca i64, align 8
22071 // CHECK22-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8
22072 // CHECK22-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
22073 // CHECK22-NEXT:    [[TMP:%.*]] = alloca i64, align 8
22074 // CHECK22-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
22075 // CHECK22-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
22076 // CHECK22-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
22077 // CHECK22-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22078 // CHECK22-NEXT:    [[I:%.*]] = alloca i64, align 8
22079 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
22080 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
22081 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
22082 // CHECK22-NEXT:    store i64 [[AA]], i64* [[AA_ADDR]], align 8
22083 // CHECK22-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8
22084 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
22085 // CHECK22-NEXT:    [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16*
22086 // CHECK22-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8
22087 // CHECK22-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
22088 // CHECK22-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
22089 // CHECK22-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
22090 // CHECK22-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22091 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
22092 // CHECK22-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
22093 // CHECK22-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
22094 // CHECK22-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22095 // CHECK22-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
22096 // CHECK22-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22097 // CHECK22:       cond.true:
22098 // CHECK22-NEXT:    br label [[COND_END:%.*]]
22099 // CHECK22:       cond.false:
22100 // CHECK22-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22101 // CHECK22-NEXT:    br label [[COND_END]]
22102 // CHECK22:       cond.end:
22103 // CHECK22-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
22104 // CHECK22-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
22105 // CHECK22-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
22106 // CHECK22-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
22107 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22108 // CHECK22:       omp.inner.for.cond:
22109 // CHECK22-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32
22110 // CHECK22-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !32
22111 // CHECK22-NEXT:    [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
22112 // CHECK22-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22113 // CHECK22:       omp.inner.for.body:
22114 // CHECK22-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32
22115 // CHECK22-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
22116 // CHECK22-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
22117 // CHECK22-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !32
22118 // CHECK22-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group !32
22119 // CHECK22-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1
22120 // CHECK22-NEXT:    store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group !32
22121 // CHECK22-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group !32
22122 // CHECK22-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP10]] to i32
22123 // CHECK22-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
22124 // CHECK22-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
22125 // CHECK22-NEXT:    store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group !32
22126 // CHECK22-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2
22127 // CHECK22-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
22128 // CHECK22-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1
22129 // CHECK22-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !32
22130 // CHECK22-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22131 // CHECK22:       omp.body.continue:
22132 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22133 // CHECK22:       omp.inner.for.inc:
22134 // CHECK22-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32
22135 // CHECK22-NEXT:    [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1
22136 // CHECK22-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !32
22137 // CHECK22-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
22138 // CHECK22:       omp.inner.for.end:
22139 // CHECK22-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22140 // CHECK22:       omp.loop.exit:
22141 // CHECK22-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
22142 // CHECK22-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22143 // CHECK22-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
22144 // CHECK22-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22145 // CHECK22:       .omp.final.then:
22146 // CHECK22-NEXT:    store i64 11, i64* [[I]], align 8
22147 // CHECK22-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22148 // CHECK22:       .omp.final.done:
22149 // CHECK22-NEXT:    ret void
22150 //
22151 //
22152 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
22153 // CHECK23-SAME: () #[[ATTR0:[0-9]+]] {
22154 // CHECK23-NEXT:  entry:
22155 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
22156 // CHECK23-NEXT:    ret void
22157 //
22158 //
22159 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined.
22160 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
22161 // CHECK23-NEXT:  entry:
22162 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22163 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22164 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22165 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22166 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22167 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22168 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22169 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22170 // CHECK23-NEXT:    [[I:%.*]] = alloca i32, align 4
22171 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22172 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22173 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22174 // CHECK23-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
22175 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22176 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22177 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22178 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
22179 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22180 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22181 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
22182 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22183 // CHECK23:       cond.true:
22184 // CHECK23-NEXT:    br label [[COND_END:%.*]]
22185 // CHECK23:       cond.false:
22186 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22187 // CHECK23-NEXT:    br label [[COND_END]]
22188 // CHECK23:       cond.end:
22189 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
22190 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22191 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22192 // CHECK23-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
22193 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22194 // CHECK23:       omp.inner.for.cond:
22195 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
22196 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
22197 // CHECK23-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
22198 // CHECK23-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22199 // CHECK23:       omp.inner.for.body:
22200 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
22201 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
22202 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
22203 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
22204 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22205 // CHECK23:       omp.body.continue:
22206 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22207 // CHECK23:       omp.inner.for.inc:
22208 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
22209 // CHECK23-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
22210 // CHECK23-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
22211 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
22212 // CHECK23:       omp.inner.for.end:
22213 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22214 // CHECK23:       omp.loop.exit:
22215 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
22216 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22217 // CHECK23-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
22218 // CHECK23-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22219 // CHECK23:       .omp.final.then:
22220 // CHECK23-NEXT:    store i32 33, i32* [[I]], align 4
22221 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22222 // CHECK23:       .omp.final.done:
22223 // CHECK23-NEXT:    ret void
22224 //
22225 //
22226 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
22227 // CHECK23-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] {
22228 // CHECK23-NEXT:  entry:
22229 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22230 // CHECK23-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
22231 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22232 // CHECK23-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
22233 // CHECK23-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
22234 // CHECK23-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
22235 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22236 // CHECK23-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
22237 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22238 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22239 // CHECK23-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
22240 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
22241 // CHECK23-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
22242 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
22243 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
22244 // CHECK23-NEXT:    store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4
22245 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
22246 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
22247 // CHECK23-NEXT:    store i32 [[TMP4]], i32* [[A_CASTED]], align 4
22248 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4
22249 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]])
22250 // CHECK23-NEXT:    ret void
22251 //
22252 //
22253 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..1
22254 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] {
22255 // CHECK23-NEXT:  entry:
22256 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22257 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22258 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22259 // CHECK23-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
22260 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22261 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
22262 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i64, align 4
22263 // CHECK23-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
22264 // CHECK23-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i32, align 4
22265 // CHECK23-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
22266 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
22267 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
22268 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
22269 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22270 // CHECK23-NEXT:    [[IT:%.*]] = alloca i64, align 8
22271 // CHECK23-NEXT:    [[LIN2:%.*]] = alloca i32, align 4
22272 // CHECK23-NEXT:    [[A3:%.*]] = alloca i32, align 4
22273 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22274 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22275 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22276 // CHECK23-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
22277 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22278 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22279 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
22280 // CHECK23-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
22281 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
22282 // CHECK23-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4
22283 // CHECK23-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]]
22284 // CHECK23-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
22285 // CHECK23-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
22286 // CHECK23-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
22287 // CHECK23-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
22288 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22289 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22290 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
22291 // CHECK23-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
22292 // CHECK23-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
22293 // CHECK23-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22294 // CHECK23-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
22295 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22296 // CHECK23:       cond.true:
22297 // CHECK23-NEXT:    br label [[COND_END:%.*]]
22298 // CHECK23:       cond.false:
22299 // CHECK23-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22300 // CHECK23-NEXT:    br label [[COND_END]]
22301 // CHECK23:       cond.end:
22302 // CHECK23-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
22303 // CHECK23-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
22304 // CHECK23-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
22305 // CHECK23-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
22306 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22307 // CHECK23:       omp.inner.for.cond:
22308 // CHECK23-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
22309 // CHECK23-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
22310 // CHECK23-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
22311 // CHECK23-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22312 // CHECK23:       omp.inner.for.body:
22313 // CHECK23-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
22314 // CHECK23-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
22315 // CHECK23-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
22316 // CHECK23-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
22317 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !18
22318 // CHECK23-NEXT:    [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
22319 // CHECK23-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
22320 // CHECK23-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18
22321 // CHECK23-NEXT:    [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]]
22322 // CHECK23-NEXT:    [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]]
22323 // CHECK23-NEXT:    [[CONV7:%.*]] = trunc i64 [[ADD]] to i32
22324 // CHECK23-NEXT:    store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !18
22325 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !18
22326 // CHECK23-NEXT:    [[CONV8:%.*]] = sext i32 [[TMP13]] to i64
22327 // CHECK23-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
22328 // CHECK23-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18
22329 // CHECK23-NEXT:    [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]]
22330 // CHECK23-NEXT:    [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]]
22331 // CHECK23-NEXT:    [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32
22332 // CHECK23-NEXT:    store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !18
22333 // CHECK23-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !18
22334 // CHECK23-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP16]] to i32
22335 // CHECK23-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
22336 // CHECK23-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
22337 // CHECK23-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !18
22338 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22339 // CHECK23:       omp.body.continue:
22340 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22341 // CHECK23:       omp.inner.for.inc:
22342 // CHECK23-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
22343 // CHECK23-NEXT:    [[ADD15:%.*]] = add i64 [[TMP17]], 1
22344 // CHECK23-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
22345 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
22346 // CHECK23:       omp.inner.for.end:
22347 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22348 // CHECK23:       omp.loop.exit:
22349 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
22350 // CHECK23-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22351 // CHECK23-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
22352 // CHECK23-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22353 // CHECK23:       .omp.final.then:
22354 // CHECK23-NEXT:    store i64 400, i64* [[IT]], align 8
22355 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22356 // CHECK23:       .omp.final.done:
22357 // CHECK23-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22358 // CHECK23-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
22359 // CHECK23-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
22360 // CHECK23:       .omp.linear.pu:
22361 // CHECK23-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4
22362 // CHECK23-NEXT:    store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4
22363 // CHECK23-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A3]], align 4
22364 // CHECK23-NEXT:    store i32 [[TMP23]], i32* [[A_ADDR]], align 4
22365 // CHECK23-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
22366 // CHECK23:       .omp.linear.pu.done:
22367 // CHECK23-NEXT:    ret void
22368 //
22369 //
22370 // CHECK23-LABEL: define {{[^@]+}}@_Z7get_valv
22371 // CHECK23-SAME: () #[[ATTR3:[0-9]+]] {
22372 // CHECK23-NEXT:  entry:
22373 // CHECK23-NEXT:    ret i64 0
22374 //
22375 //
22376 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
22377 // CHECK23-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
22378 // CHECK23-NEXT:  entry:
22379 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22380 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22381 // CHECK23-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
22382 // CHECK23-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
22383 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22384 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22385 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22386 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
22387 // CHECK23-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
22388 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
22389 // CHECK23-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
22390 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
22391 // CHECK23-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
22392 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
22393 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
22394 // CHECK23-NEXT:    ret void
22395 //
22396 //
22397 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..2
22398 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
22399 // CHECK23-NEXT:  entry:
22400 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22401 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22402 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22403 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22404 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22405 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i16, align 2
22406 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22407 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22408 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22409 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22410 // CHECK23-NEXT:    [[IT:%.*]] = alloca i16, align 2
22411 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22412 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22413 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22414 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22415 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22416 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22417 // CHECK23-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
22418 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22419 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22420 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22421 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
22422 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
22423 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22424 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
22425 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22426 // CHECK23:       cond.true:
22427 // CHECK23-NEXT:    br label [[COND_END:%.*]]
22428 // CHECK23:       cond.false:
22429 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22430 // CHECK23-NEXT:    br label [[COND_END]]
22431 // CHECK23:       cond.end:
22432 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
22433 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22434 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22435 // CHECK23-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
22436 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22437 // CHECK23:       omp.inner.for.cond:
22438 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
22439 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
22440 // CHECK23-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
22441 // CHECK23-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22442 // CHECK23:       omp.inner.for.body:
22443 // CHECK23-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
22444 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
22445 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
22446 // CHECK23-NEXT:    [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
22447 // CHECK23-NEXT:    store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !21
22448 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !21
22449 // CHECK23-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
22450 // CHECK23-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !21
22451 // CHECK23-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !21
22452 // CHECK23-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
22453 // CHECK23-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
22454 // CHECK23-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
22455 // CHECK23-NEXT:    store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group !21
22456 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22457 // CHECK23:       omp.body.continue:
22458 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22459 // CHECK23:       omp.inner.for.inc:
22460 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
22461 // CHECK23-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
22462 // CHECK23-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
22463 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
22464 // CHECK23:       omp.inner.for.end:
22465 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22466 // CHECK23:       omp.loop.exit:
22467 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
22468 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22469 // CHECK23-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
22470 // CHECK23-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22471 // CHECK23:       .omp.final.then:
22472 // CHECK23-NEXT:    store i16 22, i16* [[IT]], align 2
22473 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22474 // CHECK23:       .omp.final.done:
22475 // CHECK23-NEXT:    ret void
22476 //
22477 //
22478 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
22479 // CHECK23-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
22480 // CHECK23-NEXT:  entry:
22481 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22482 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
22483 // CHECK23-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
22484 // CHECK23-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
22485 // CHECK23-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
22486 // CHECK23-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
22487 // CHECK23-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
22488 // CHECK23-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
22489 // CHECK23-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
22490 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
22491 // CHECK23-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
22492 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
22493 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22494 // CHECK23-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
22495 // CHECK23-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
22496 // CHECK23-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
22497 // CHECK23-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
22498 // CHECK23-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
22499 // CHECK23-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
22500 // CHECK23-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
22501 // CHECK23-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
22502 // CHECK23-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22503 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
22504 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
22505 // CHECK23-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
22506 // CHECK23-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
22507 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
22508 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
22509 // CHECK23-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
22510 // CHECK23-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
22511 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
22512 // CHECK23-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
22513 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
22514 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22515 // CHECK23-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
22516 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
22517 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]])
22518 // CHECK23-NEXT:    ret void
22519 //
22520 //
22521 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..3
22522 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
22523 // CHECK23-NEXT:  entry:
22524 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22525 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22526 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22527 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
22528 // CHECK23-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
22529 // CHECK23-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
22530 // CHECK23-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
22531 // CHECK23-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
22532 // CHECK23-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
22533 // CHECK23-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
22534 // CHECK23-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
22535 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
22536 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22537 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i8, align 1
22538 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
22539 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
22540 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
22541 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22542 // CHECK23-NEXT:    [[IT:%.*]] = alloca i8, align 1
22543 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22544 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22545 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22546 // CHECK23-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
22547 // CHECK23-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
22548 // CHECK23-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
22549 // CHECK23-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
22550 // CHECK23-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
22551 // CHECK23-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
22552 // CHECK23-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
22553 // CHECK23-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
22554 // CHECK23-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22555 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
22556 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
22557 // CHECK23-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
22558 // CHECK23-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
22559 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
22560 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
22561 // CHECK23-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
22562 // CHECK23-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
22563 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
22564 // CHECK23-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
22565 // CHECK23-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
22566 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22567 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22568 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22569 // CHECK23-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
22570 // CHECK23-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
22571 // CHECK23-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
22572 // CHECK23:       omp.dispatch.cond:
22573 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22574 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
22575 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22576 // CHECK23:       cond.true:
22577 // CHECK23-NEXT:    br label [[COND_END:%.*]]
22578 // CHECK23:       cond.false:
22579 // CHECK23-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22580 // CHECK23-NEXT:    br label [[COND_END]]
22581 // CHECK23:       cond.end:
22582 // CHECK23-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
22583 // CHECK23-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
22584 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22585 // CHECK23-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
22586 // CHECK23-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
22587 // CHECK23-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22588 // CHECK23-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
22589 // CHECK23-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
22590 // CHECK23:       omp.dispatch.body:
22591 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22592 // CHECK23:       omp.inner.for.cond:
22593 // CHECK23-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
22594 // CHECK23-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
22595 // CHECK23-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
22596 // CHECK23-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22597 // CHECK23:       omp.inner.for.body:
22598 // CHECK23-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
22599 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
22600 // CHECK23-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
22601 // CHECK23-NEXT:    [[CONV:%.*]] = trunc i32 [[SUB]] to i8
22602 // CHECK23-NEXT:    store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !24
22603 // CHECK23-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !24
22604 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
22605 // CHECK23-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !24
22606 // CHECK23-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
22607 // CHECK23-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24
22608 // CHECK23-NEXT:    [[CONV7:%.*]] = fpext float [[TMP20]] to double
22609 // CHECK23-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
22610 // CHECK23-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
22611 // CHECK23-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24
22612 // CHECK23-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
22613 // CHECK23-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
22614 // CHECK23-NEXT:    [[CONV11:%.*]] = fpext float [[TMP21]] to double
22615 // CHECK23-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
22616 // CHECK23-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
22617 // CHECK23-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
22618 // CHECK23-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
22619 // CHECK23-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2
22620 // CHECK23-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
22621 // CHECK23-NEXT:    [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00
22622 // CHECK23-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
22623 // CHECK23-NEXT:    [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]]
22624 // CHECK23-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]]
22625 // CHECK23-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3
22626 // CHECK23-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
22627 // CHECK23-NEXT:    [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00
22628 // CHECK23-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
22629 // CHECK23-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
22630 // CHECK23-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !24
22631 // CHECK23-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1
22632 // CHECK23-NEXT:    store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !24
22633 // CHECK23-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
22634 // CHECK23-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !24
22635 // CHECK23-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP26]] to i32
22636 // CHECK23-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
22637 // CHECK23-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
22638 // CHECK23-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !24
22639 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22640 // CHECK23:       omp.body.continue:
22641 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22642 // CHECK23:       omp.inner.for.inc:
22643 // CHECK23-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
22644 // CHECK23-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1
22645 // CHECK23-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
22646 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
22647 // CHECK23:       omp.inner.for.end:
22648 // CHECK23-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
22649 // CHECK23:       omp.dispatch.inc:
22650 // CHECK23-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
22651 // CHECK23-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22652 // CHECK23-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
22653 // CHECK23-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4
22654 // CHECK23-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
22655 // CHECK23-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
22656 // CHECK23-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
22657 // CHECK23-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4
22658 // CHECK23-NEXT:    br label [[OMP_DISPATCH_COND]]
22659 // CHECK23:       omp.dispatch.end:
22660 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
22661 // CHECK23-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22662 // CHECK23-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
22663 // CHECK23-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22664 // CHECK23:       .omp.final.then:
22665 // CHECK23-NEXT:    store i8 96, i8* [[IT]], align 1
22666 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22667 // CHECK23:       .omp.final.done:
22668 // CHECK23-NEXT:    ret void
22669 //
22670 //
22671 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
22672 // CHECK23-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
22673 // CHECK23-NEXT:  entry:
22674 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22675 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22676 // CHECK23-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
22677 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
22678 // CHECK23-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
22679 // CHECK23-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
22680 // CHECK23-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
22681 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22682 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22683 // CHECK23-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
22684 // CHECK23-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
22685 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22686 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
22687 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
22688 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
22689 // CHECK23-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
22690 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
22691 // CHECK23-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
22692 // CHECK23-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
22693 // CHECK23-NEXT:    store i16 [[TMP3]], i16* [[CONV2]], align 2
22694 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
22695 // CHECK23-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1
22696 // CHECK23-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
22697 // CHECK23-NEXT:    store i8 [[TMP5]], i8* [[CONV3]], align 1
22698 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
22699 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
22700 // CHECK23-NEXT:    ret void
22701 //
22702 //
22703 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..4
22704 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
22705 // CHECK23-NEXT:  entry:
22706 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22707 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22708 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22709 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22710 // CHECK23-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
22711 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
22712 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
22713 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i32, align 4
22714 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22715 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22716 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22717 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22718 // CHECK23-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
22719 // CHECK23-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
22720 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22721 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
22722 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
22723 // CHECK23-NEXT:    ret void
22724 //
22725 //
22726 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214
22727 // CHECK23-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
22728 // CHECK23-NEXT:  entry:
22729 // CHECK23-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
22730 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
22731 // CHECK23-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
22732 // CHECK23-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
22733 // CHECK23-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
22734 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
22735 // CHECK23-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
22736 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
22737 // CHECK23-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
22738 // CHECK23-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
22739 // CHECK23-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
22740 // CHECK23-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
22741 // CHECK23-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
22742 // CHECK23-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
22743 // CHECK23-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
22744 // CHECK23-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
22745 // CHECK23-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22746 // CHECK23-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
22747 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
22748 // CHECK23-NEXT:    [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
22749 // CHECK23-NEXT:    [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4
22750 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
22751 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4
22752 // CHECK23-NEXT:    store i32 [[TMP5]], i32* [[B_CASTED]], align 4
22753 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
22754 // CHECK23-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV]], align 1
22755 // CHECK23-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
22756 // CHECK23-NEXT:    [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
22757 // CHECK23-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
22758 // CHECK23-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV3]], align 1
22759 // CHECK23-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
22760 // CHECK23-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1
22761 // CHECK23-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1
22762 // CHECK23-NEXT:    br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
22763 // CHECK23:       omp_if.then:
22764 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]])
22765 // CHECK23-NEXT:    br label [[OMP_IF_END:%.*]]
22766 // CHECK23:       omp_if.else:
22767 // CHECK23-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
22768 // CHECK23-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
22769 // CHECK23-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
22770 // CHECK23-NEXT:    call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR2:[0-9]+]]
22771 // CHECK23-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
22772 // CHECK23-NEXT:    br label [[OMP_IF_END]]
22773 // CHECK23:       omp_if.end:
22774 // CHECK23-NEXT:    ret void
22775 //
22776 //
22777 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..5
22778 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
22779 // CHECK23-NEXT:  entry:
22780 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22781 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22782 // CHECK23-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
22783 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
22784 // CHECK23-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
22785 // CHECK23-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
22786 // CHECK23-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
22787 // CHECK23-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
22788 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
22789 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i64, align 4
22790 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
22791 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
22792 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
22793 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22794 // CHECK23-NEXT:    [[IT:%.*]] = alloca i64, align 8
22795 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22796 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22797 // CHECK23-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
22798 // CHECK23-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
22799 // CHECK23-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
22800 // CHECK23-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
22801 // CHECK23-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
22802 // CHECK23-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
22803 // CHECK23-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
22804 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
22805 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
22806 // CHECK23-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
22807 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
22808 // CHECK23-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
22809 // CHECK23-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
22810 // CHECK23-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
22811 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22812 // CHECK23-NEXT:    [[TMP4:%.*]] = load i8, i8* [[CONV]], align 1
22813 // CHECK23-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1
22814 // CHECK23-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
22815 // CHECK23:       omp_if.then:
22816 // CHECK23-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22817 // CHECK23-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
22818 // CHECK23-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
22819 // CHECK23-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22820 // CHECK23-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3
22821 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22822 // CHECK23:       cond.true:
22823 // CHECK23-NEXT:    br label [[COND_END:%.*]]
22824 // CHECK23:       cond.false:
22825 // CHECK23-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22826 // CHECK23-NEXT:    br label [[COND_END]]
22827 // CHECK23:       cond.end:
22828 // CHECK23-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
22829 // CHECK23-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
22830 // CHECK23-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
22831 // CHECK23-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8
22832 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
22833 // CHECK23:       omp.inner.for.cond:
22834 // CHECK23-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
22835 // CHECK23-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !27
22836 // CHECK23-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]]
22837 // CHECK23-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
22838 // CHECK23:       omp.inner.for.body:
22839 // CHECK23-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
22840 // CHECK23-NEXT:    [[MUL:%.*]] = mul i64 [[TMP12]], 400
22841 // CHECK23-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
22842 // CHECK23-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !27
22843 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !27
22844 // CHECK23-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double
22845 // CHECK23-NEXT:    [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00
22846 // CHECK23-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
22847 // CHECK23-NEXT:    store double [[ADD]], double* [[A]], align 4, !nontemporal !28, !llvm.access.group !27
22848 // CHECK23-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
22849 // CHECK23-NEXT:    [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !28, !llvm.access.group !27
22850 // CHECK23-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
22851 // CHECK23-NEXT:    store double [[INC]], double* [[A5]], align 4, !nontemporal !28, !llvm.access.group !27
22852 // CHECK23-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
22853 // CHECK23-NEXT:    [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]]
22854 // CHECK23-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]]
22855 // CHECK23-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
22856 // CHECK23-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !27
22857 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
22858 // CHECK23:       omp.body.continue:
22859 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
22860 // CHECK23:       omp.inner.for.inc:
22861 // CHECK23-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
22862 // CHECK23-NEXT:    [[ADD8:%.*]] = add i64 [[TMP16]], 1
22863 // CHECK23-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
22864 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
22865 // CHECK23:       omp.inner.for.end:
22866 // CHECK23-NEXT:    br label [[OMP_IF_END:%.*]]
22867 // CHECK23:       omp_if.else:
22868 // CHECK23-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22869 // CHECK23-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
22870 // CHECK23-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
22871 // CHECK23-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22872 // CHECK23-NEXT:    [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3
22873 // CHECK23-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
22874 // CHECK23:       cond.true10:
22875 // CHECK23-NEXT:    br label [[COND_END12:%.*]]
22876 // CHECK23:       cond.false11:
22877 // CHECK23-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22878 // CHECK23-NEXT:    br label [[COND_END12]]
22879 // CHECK23:       cond.end12:
22880 // CHECK23-NEXT:    [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ]
22881 // CHECK23-NEXT:    store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8
22882 // CHECK23-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
22883 // CHECK23-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8
22884 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND14:%.*]]
22885 // CHECK23:       omp.inner.for.cond14:
22886 // CHECK23-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22887 // CHECK23-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22888 // CHECK23-NEXT:    [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
22889 // CHECK23-NEXT:    br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]]
22890 // CHECK23:       omp.inner.for.body16:
22891 // CHECK23-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22892 // CHECK23-NEXT:    [[MUL17:%.*]] = mul i64 [[TMP24]], 400
22893 // CHECK23-NEXT:    [[SUB18:%.*]] = sub i64 2000, [[MUL17]]
22894 // CHECK23-NEXT:    store i64 [[SUB18]], i64* [[IT]], align 8
22895 // CHECK23-NEXT:    [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4
22896 // CHECK23-NEXT:    [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double
22897 // CHECK23-NEXT:    [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00
22898 // CHECK23-NEXT:    [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
22899 // CHECK23-NEXT:    store double [[ADD20]], double* [[A21]], align 4
22900 // CHECK23-NEXT:    [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
22901 // CHECK23-NEXT:    [[TMP26:%.*]] = load double, double* [[A22]], align 4
22902 // CHECK23-NEXT:    [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00
22903 // CHECK23-NEXT:    store double [[INC23]], double* [[A22]], align 4
22904 // CHECK23-NEXT:    [[CONV24:%.*]] = fptosi double [[INC23]] to i16
22905 // CHECK23-NEXT:    [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]]
22906 // CHECK23-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]]
22907 // CHECK23-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1
22908 // CHECK23-NEXT:    store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2
22909 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE27:%.*]]
22910 // CHECK23:       omp.body.continue27:
22911 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC28:%.*]]
22912 // CHECK23:       omp.inner.for.inc28:
22913 // CHECK23-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
22914 // CHECK23-NEXT:    [[ADD29:%.*]] = add i64 [[TMP28]], 1
22915 // CHECK23-NEXT:    store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8
22916 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP31:![0-9]+]]
22917 // CHECK23:       omp.inner.for.end30:
22918 // CHECK23-NEXT:    br label [[OMP_IF_END]]
22919 // CHECK23:       omp_if.end:
22920 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
22921 // CHECK23:       omp.loop.exit:
22922 // CHECK23-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22923 // CHECK23-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
22924 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
22925 // CHECK23-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
22926 // CHECK23-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
22927 // CHECK23-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
22928 // CHECK23:       .omp.final.then:
22929 // CHECK23-NEXT:    store i64 400, i64* [[IT]], align 8
22930 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
22931 // CHECK23:       .omp.final.done:
22932 // CHECK23-NEXT:    ret void
22933 //
22934 //
22935 // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
22936 // CHECK23-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
22937 // CHECK23-NEXT:  entry:
22938 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22939 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22940 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
22941 // CHECK23-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
22942 // CHECK23-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
22943 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22944 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22945 // CHECK23-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
22946 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22947 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
22948 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
22949 // CHECK23-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
22950 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
22951 // CHECK23-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
22952 // CHECK23-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
22953 // CHECK23-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
22954 // CHECK23-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
22955 // CHECK23-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
22956 // CHECK23-NEXT:    ret void
22957 //
22958 //
22959 // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..6
22960 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
22961 // CHECK23-NEXT:  entry:
22962 // CHECK23-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
22963 // CHECK23-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
22964 // CHECK23-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
22965 // CHECK23-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
22966 // CHECK23-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
22967 // CHECK23-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
22968 // CHECK23-NEXT:    [[TMP:%.*]] = alloca i64, align 4
22969 // CHECK23-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
22970 // CHECK23-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
22971 // CHECK23-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
22972 // CHECK23-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
22973 // CHECK23-NEXT:    [[I:%.*]] = alloca i64, align 8
22974 // CHECK23-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
22975 // CHECK23-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
22976 // CHECK23-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
22977 // CHECK23-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
22978 // CHECK23-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
22979 // CHECK23-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
22980 // CHECK23-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
22981 // CHECK23-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
22982 // CHECK23-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
22983 // CHECK23-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
22984 // CHECK23-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
22985 // CHECK23-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
22986 // CHECK23-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
22987 // CHECK23-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
22988 // CHECK23-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22989 // CHECK23-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
22990 // CHECK23-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
22991 // CHECK23:       cond.true:
22992 // CHECK23-NEXT:    br label [[COND_END:%.*]]
22993 // CHECK23:       cond.false:
22994 // CHECK23-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
22995 // CHECK23-NEXT:    br label [[COND_END]]
22996 // CHECK23:       cond.end:
22997 // CHECK23-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
22998 // CHECK23-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
22999 // CHECK23-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
23000 // CHECK23-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
23001 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23002 // CHECK23:       omp.inner.for.cond:
23003 // CHECK23-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33
23004 // CHECK23-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !33
23005 // CHECK23-NEXT:    [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
23006 // CHECK23-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23007 // CHECK23:       omp.inner.for.body:
23008 // CHECK23-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33
23009 // CHECK23-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
23010 // CHECK23-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
23011 // CHECK23-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !33
23012 // CHECK23-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33
23013 // CHECK23-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
23014 // CHECK23-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !33
23015 // CHECK23-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !33
23016 // CHECK23-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
23017 // CHECK23-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
23018 // CHECK23-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
23019 // CHECK23-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !33
23020 // CHECK23-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
23021 // CHECK23-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
23022 // CHECK23-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
23023 // CHECK23-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
23024 // CHECK23-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23025 // CHECK23:       omp.body.continue:
23026 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23027 // CHECK23:       omp.inner.for.inc:
23028 // CHECK23-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33
23029 // CHECK23-NEXT:    [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1
23030 // CHECK23-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33
23031 // CHECK23-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
23032 // CHECK23:       omp.inner.for.end:
23033 // CHECK23-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23034 // CHECK23:       omp.loop.exit:
23035 // CHECK23-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
23036 // CHECK23-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23037 // CHECK23-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
23038 // CHECK23-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23039 // CHECK23:       .omp.final.then:
23040 // CHECK23-NEXT:    store i64 11, i64* [[I]], align 8
23041 // CHECK23-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23042 // CHECK23:       .omp.final.done:
23043 // CHECK23-NEXT:    ret void
23044 //
23045 //
23046 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96
23047 // CHECK24-SAME: () #[[ATTR0:[0-9]+]] {
23048 // CHECK24-NEXT:  entry:
23049 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
23050 // CHECK24-NEXT:    ret void
23051 //
23052 //
23053 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined.
23054 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
23055 // CHECK24-NEXT:  entry:
23056 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23057 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23058 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23059 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23060 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23061 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23062 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23063 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23064 // CHECK24-NEXT:    [[I:%.*]] = alloca i32, align 4
23065 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23066 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23067 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23068 // CHECK24-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
23069 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23070 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23071 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23072 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
23073 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23074 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23075 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5
23076 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23077 // CHECK24:       cond.true:
23078 // CHECK24-NEXT:    br label [[COND_END:%.*]]
23079 // CHECK24:       cond.false:
23080 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23081 // CHECK24-NEXT:    br label [[COND_END]]
23082 // CHECK24:       cond.end:
23083 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
23084 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23085 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23086 // CHECK24-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
23087 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23088 // CHECK24:       omp.inner.for.cond:
23089 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
23090 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12
23091 // CHECK24-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
23092 // CHECK24-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23093 // CHECK24:       omp.inner.for.body:
23094 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
23095 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
23096 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
23097 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12
23098 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23099 // CHECK24:       omp.body.continue:
23100 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23101 // CHECK24:       omp.inner.for.inc:
23102 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
23103 // CHECK24-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
23104 // CHECK24-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12
23105 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
23106 // CHECK24:       omp.inner.for.end:
23107 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23108 // CHECK24:       omp.loop.exit:
23109 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
23110 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23111 // CHECK24-NEXT:    [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0
23112 // CHECK24-NEXT:    br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23113 // CHECK24:       .omp.final.then:
23114 // CHECK24-NEXT:    store i32 33, i32* [[I]], align 4
23115 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23116 // CHECK24:       .omp.final.done:
23117 // CHECK24-NEXT:    ret void
23118 //
23119 //
23120 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108
23121 // CHECK24-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] {
23122 // CHECK24-NEXT:  entry:
23123 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
23124 // CHECK24-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
23125 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23126 // CHECK24-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
23127 // CHECK24-NEXT:    [[LIN_CASTED:%.*]] = alloca i32, align 4
23128 // CHECK24-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
23129 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
23130 // CHECK24-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
23131 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23132 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
23133 // CHECK24-NEXT:    [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2
23134 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
23135 // CHECK24-NEXT:    store i16 [[TMP0]], i16* [[CONV1]], align 2
23136 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4
23137 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
23138 // CHECK24-NEXT:    store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4
23139 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4
23140 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4
23141 // CHECK24-NEXT:    store i32 [[TMP4]], i32* [[A_CASTED]], align 4
23142 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4
23143 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]])
23144 // CHECK24-NEXT:    ret void
23145 //
23146 //
23147 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..1
23148 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] {
23149 // CHECK24-NEXT:  entry:
23150 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23151 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23152 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
23153 // CHECK24-NEXT:    [[LIN_ADDR:%.*]] = alloca i32, align 4
23154 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23155 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
23156 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i64, align 4
23157 // CHECK24-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
23158 // CHECK24-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i32, align 4
23159 // CHECK24-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
23160 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
23161 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
23162 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
23163 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23164 // CHECK24-NEXT:    [[IT:%.*]] = alloca i64, align 8
23165 // CHECK24-NEXT:    [[LIN2:%.*]] = alloca i32, align 4
23166 // CHECK24-NEXT:    [[A3:%.*]] = alloca i32, align 4
23167 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23168 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23169 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
23170 // CHECK24-NEXT:    store i32 [[LIN]], i32* [[LIN_ADDR]], align 4
23171 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23172 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
23173 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4
23174 // CHECK24-NEXT:    store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4
23175 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
23176 // CHECK24-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4
23177 // CHECK24-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]]
23178 // CHECK24-NEXT:    store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8
23179 // CHECK24-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
23180 // CHECK24-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
23181 // CHECK24-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
23182 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23183 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23184 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
23185 // CHECK24-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]])
23186 // CHECK24-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
23187 // CHECK24-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
23188 // CHECK24-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3
23189 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23190 // CHECK24:       cond.true:
23191 // CHECK24-NEXT:    br label [[COND_END:%.*]]
23192 // CHECK24:       cond.false:
23193 // CHECK24-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
23194 // CHECK24-NEXT:    br label [[COND_END]]
23195 // CHECK24:       cond.end:
23196 // CHECK24-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
23197 // CHECK24-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
23198 // CHECK24-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
23199 // CHECK24-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
23200 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23201 // CHECK24:       omp.inner.for.cond:
23202 // CHECK24-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
23203 // CHECK24-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
23204 // CHECK24-NEXT:    [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
23205 // CHECK24-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23206 // CHECK24:       omp.inner.for.body:
23207 // CHECK24-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
23208 // CHECK24-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
23209 // CHECK24-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
23210 // CHECK24-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
23211 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group !18
23212 // CHECK24-NEXT:    [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
23213 // CHECK24-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
23214 // CHECK24-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18
23215 // CHECK24-NEXT:    [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]]
23216 // CHECK24-NEXT:    [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]]
23217 // CHECK24-NEXT:    [[CONV7:%.*]] = trunc i64 [[ADD]] to i32
23218 // CHECK24-NEXT:    store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group !18
23219 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group !18
23220 // CHECK24-NEXT:    [[CONV8:%.*]] = sext i32 [[TMP13]] to i64
23221 // CHECK24-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
23222 // CHECK24-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !18
23223 // CHECK24-NEXT:    [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]]
23224 // CHECK24-NEXT:    [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]]
23225 // CHECK24-NEXT:    [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32
23226 // CHECK24-NEXT:    store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group !18
23227 // CHECK24-NEXT:    [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !18
23228 // CHECK24-NEXT:    [[CONV12:%.*]] = sext i16 [[TMP16]] to i32
23229 // CHECK24-NEXT:    [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1
23230 // CHECK24-NEXT:    [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16
23231 // CHECK24-NEXT:    store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group !18
23232 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23233 // CHECK24:       omp.body.continue:
23234 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23235 // CHECK24:       omp.inner.for.inc:
23236 // CHECK24-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
23237 // CHECK24-NEXT:    [[ADD15:%.*]] = add i64 [[TMP17]], 1
23238 // CHECK24-NEXT:    store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
23239 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
23240 // CHECK24:       omp.inner.for.end:
23241 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23242 // CHECK24:       omp.loop.exit:
23243 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]])
23244 // CHECK24-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23245 // CHECK24-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
23246 // CHECK24-NEXT:    br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23247 // CHECK24:       .omp.final.then:
23248 // CHECK24-NEXT:    store i64 400, i64* [[IT]], align 8
23249 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23250 // CHECK24:       .omp.final.done:
23251 // CHECK24-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23252 // CHECK24-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
23253 // CHECK24-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
23254 // CHECK24:       .omp.linear.pu:
23255 // CHECK24-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4
23256 // CHECK24-NEXT:    store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4
23257 // CHECK24-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A3]], align 4
23258 // CHECK24-NEXT:    store i32 [[TMP23]], i32* [[A_ADDR]], align 4
23259 // CHECK24-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
23260 // CHECK24:       .omp.linear.pu.done:
23261 // CHECK24-NEXT:    ret void
23262 //
23263 //
23264 // CHECK24-LABEL: define {{[^@]+}}@_Z7get_valv
23265 // CHECK24-SAME: () #[[ATTR3:[0-9]+]] {
23266 // CHECK24-NEXT:  entry:
23267 // CHECK24-NEXT:    ret i64 0
23268 //
23269 //
23270 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116
23271 // CHECK24-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] {
23272 // CHECK24-NEXT:  entry:
23273 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23274 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
23275 // CHECK24-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
23276 // CHECK24-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
23277 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23278 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
23279 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
23280 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
23281 // CHECK24-NEXT:    store i32 [[TMP0]], i32* [[A_CASTED]], align 4
23282 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4
23283 // CHECK24-NEXT:    [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2
23284 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
23285 // CHECK24-NEXT:    store i16 [[TMP2]], i16* [[CONV1]], align 2
23286 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4
23287 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]])
23288 // CHECK24-NEXT:    ret void
23289 //
23290 //
23291 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..2
23292 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] {
23293 // CHECK24-NEXT:  entry:
23294 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23295 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23296 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23297 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
23298 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23299 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i16, align 2
23300 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23301 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23302 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23303 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23304 // CHECK24-NEXT:    [[IT:%.*]] = alloca i16, align 2
23305 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23306 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23307 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23308 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
23309 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
23310 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23311 // CHECK24-NEXT:    store i32 3, i32* [[DOTOMP_UB]], align 4
23312 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23313 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23314 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23315 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
23316 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
23317 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23318 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3
23319 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23320 // CHECK24:       cond.true:
23321 // CHECK24-NEXT:    br label [[COND_END:%.*]]
23322 // CHECK24:       cond.false:
23323 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23324 // CHECK24-NEXT:    br label [[COND_END]]
23325 // CHECK24:       cond.end:
23326 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
23327 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23328 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23329 // CHECK24-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
23330 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23331 // CHECK24:       omp.inner.for.cond:
23332 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
23333 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
23334 // CHECK24-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
23335 // CHECK24-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23336 // CHECK24:       omp.inner.for.body:
23337 // CHECK24-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
23338 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4
23339 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 6, [[MUL]]
23340 // CHECK24-NEXT:    [[CONV2:%.*]] = trunc i32 [[ADD]] to i16
23341 // CHECK24-NEXT:    store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group !21
23342 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !21
23343 // CHECK24-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
23344 // CHECK24-NEXT:    store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !21
23345 // CHECK24-NEXT:    [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !21
23346 // CHECK24-NEXT:    [[CONV4:%.*]] = sext i16 [[TMP9]] to i32
23347 // CHECK24-NEXT:    [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1
23348 // CHECK24-NEXT:    [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
23349 // CHECK24-NEXT:    store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group !21
23350 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23351 // CHECK24:       omp.body.continue:
23352 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23353 // CHECK24:       omp.inner.for.inc:
23354 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
23355 // CHECK24-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1
23356 // CHECK24-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
23357 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
23358 // CHECK24:       omp.inner.for.end:
23359 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23360 // CHECK24:       omp.loop.exit:
23361 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
23362 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23363 // CHECK24-NEXT:    [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
23364 // CHECK24-NEXT:    br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23365 // CHECK24:       .omp.final.then:
23366 // CHECK24-NEXT:    store i16 22, i16* [[IT]], align 2
23367 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23368 // CHECK24:       .omp.final.done:
23369 // CHECK24-NEXT:    ret void
23370 //
23371 //
23372 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140
23373 // CHECK24-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
23374 // CHECK24-NEXT:  entry:
23375 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23376 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
23377 // CHECK24-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
23378 // CHECK24-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
23379 // CHECK24-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
23380 // CHECK24-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
23381 // CHECK24-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
23382 // CHECK24-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
23383 // CHECK24-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
23384 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
23385 // CHECK24-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
23386 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
23387 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23388 // CHECK24-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
23389 // CHECK24-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
23390 // CHECK24-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
23391 // CHECK24-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
23392 // CHECK24-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
23393 // CHECK24-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
23394 // CHECK24-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
23395 // CHECK24-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
23396 // CHECK24-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
23397 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
23398 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
23399 // CHECK24-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
23400 // CHECK24-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
23401 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
23402 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
23403 // CHECK24-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
23404 // CHECK24-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
23405 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4
23406 // CHECK24-NEXT:    store i32 [[TMP8]], i32* [[A_CASTED]], align 4
23407 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4
23408 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
23409 // CHECK24-NEXT:    store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
23410 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
23411 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]])
23412 // CHECK24-NEXT:    ret void
23413 //
23414 //
23415 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..3
23416 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
23417 // CHECK24-NEXT:  entry:
23418 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23419 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23420 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23421 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x float]*, align 4
23422 // CHECK24-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
23423 // CHECK24-NEXT:    [[BN_ADDR:%.*]] = alloca float*, align 4
23424 // CHECK24-NEXT:    [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4
23425 // CHECK24-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
23426 // CHECK24-NEXT:    [[VLA_ADDR4:%.*]] = alloca i32, align 4
23427 // CHECK24-NEXT:    [[CN_ADDR:%.*]] = alloca double*, align 4
23428 // CHECK24-NEXT:    [[D_ADDR:%.*]] = alloca %struct.TT*, align 4
23429 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
23430 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23431 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i8, align 1
23432 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23433 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23434 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
23435 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23436 // CHECK24-NEXT:    [[IT:%.*]] = alloca i8, align 1
23437 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23438 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23439 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23440 // CHECK24-NEXT:    store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4
23441 // CHECK24-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
23442 // CHECK24-NEXT:    store float* [[BN]], float** [[BN_ADDR]], align 4
23443 // CHECK24-NEXT:    store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4
23444 // CHECK24-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
23445 // CHECK24-NEXT:    store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4
23446 // CHECK24-NEXT:    store double* [[CN]], double** [[CN_ADDR]], align 4
23447 // CHECK24-NEXT:    store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4
23448 // CHECK24-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
23449 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4
23450 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
23451 // CHECK24-NEXT:    [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4
23452 // CHECK24-NEXT:    [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4
23453 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
23454 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4
23455 // CHECK24-NEXT:    [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4
23456 // CHECK24-NEXT:    [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4
23457 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
23458 // CHECK24-NEXT:    store i32 25, i32* [[DOTOMP_UB]], align 4
23459 // CHECK24-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
23460 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23461 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
23462 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23463 // CHECK24-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
23464 // CHECK24-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]])
23465 // CHECK24-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
23466 // CHECK24:       omp.dispatch.cond:
23467 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23468 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25
23469 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23470 // CHECK24:       cond.true:
23471 // CHECK24-NEXT:    br label [[COND_END:%.*]]
23472 // CHECK24:       cond.false:
23473 // CHECK24-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23474 // CHECK24-NEXT:    br label [[COND_END]]
23475 // CHECK24:       cond.end:
23476 // CHECK24-NEXT:    [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
23477 // CHECK24-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
23478 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23479 // CHECK24-NEXT:    store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
23480 // CHECK24-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
23481 // CHECK24-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23482 // CHECK24-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
23483 // CHECK24-NEXT:    br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
23484 // CHECK24:       omp.dispatch.body:
23485 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23486 // CHECK24:       omp.inner.for.cond:
23487 // CHECK24-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
23488 // CHECK24-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
23489 // CHECK24-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
23490 // CHECK24-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23491 // CHECK24:       omp.inner.for.body:
23492 // CHECK24-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
23493 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
23494 // CHECK24-NEXT:    [[SUB:%.*]] = sub nsw i32 122, [[MUL]]
23495 // CHECK24-NEXT:    [[CONV:%.*]] = trunc i32 [[SUB]] to i8
23496 // CHECK24-NEXT:    store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group !24
23497 // CHECK24-NEXT:    [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !24
23498 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], 1
23499 // CHECK24-NEXT:    store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group !24
23500 // CHECK24-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2
23501 // CHECK24-NEXT:    [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !24
23502 // CHECK24-NEXT:    [[CONV7:%.*]] = fpext float [[TMP20]] to double
23503 // CHECK24-NEXT:    [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00
23504 // CHECK24-NEXT:    [[CONV9:%.*]] = fptrunc double [[ADD8]] to float
23505 // CHECK24-NEXT:    store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group !24
23506 // CHECK24-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3
23507 // CHECK24-NEXT:    [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
23508 // CHECK24-NEXT:    [[CONV11:%.*]] = fpext float [[TMP21]] to double
23509 // CHECK24-NEXT:    [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00
23510 // CHECK24-NEXT:    [[CONV13:%.*]] = fptrunc double [[ADD12]] to float
23511 // CHECK24-NEXT:    store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group !24
23512 // CHECK24-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1
23513 // CHECK24-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2
23514 // CHECK24-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
23515 // CHECK24-NEXT:    [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00
23516 // CHECK24-NEXT:    store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group !24
23517 // CHECK24-NEXT:    [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]]
23518 // CHECK24-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]]
23519 // CHECK24-NEXT:    [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3
23520 // CHECK24-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
23521 // CHECK24-NEXT:    [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00
23522 // CHECK24-NEXT:    store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group !24
23523 // CHECK24-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0
23524 // CHECK24-NEXT:    [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !24
23525 // CHECK24-NEXT:    [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1
23526 // CHECK24-NEXT:    store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group !24
23527 // CHECK24-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1
23528 // CHECK24-NEXT:    [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !24
23529 // CHECK24-NEXT:    [[CONV21:%.*]] = sext i8 [[TMP26]] to i32
23530 // CHECK24-NEXT:    [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1
23531 // CHECK24-NEXT:    [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8
23532 // CHECK24-NEXT:    store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group !24
23533 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23534 // CHECK24:       omp.body.continue:
23535 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23536 // CHECK24:       omp.inner.for.inc:
23537 // CHECK24-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
23538 // CHECK24-NEXT:    [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1
23539 // CHECK24-NEXT:    store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
23540 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
23541 // CHECK24:       omp.inner.for.end:
23542 // CHECK24-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
23543 // CHECK24:       omp.dispatch.inc:
23544 // CHECK24-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
23545 // CHECK24-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23546 // CHECK24-NEXT:    [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
23547 // CHECK24-NEXT:    store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4
23548 // CHECK24-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
23549 // CHECK24-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
23550 // CHECK24-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
23551 // CHECK24-NEXT:    store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4
23552 // CHECK24-NEXT:    br label [[OMP_DISPATCH_COND]]
23553 // CHECK24:       omp.dispatch.end:
23554 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]])
23555 // CHECK24-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23556 // CHECK24-NEXT:    [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0
23557 // CHECK24-NEXT:    br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23558 // CHECK24:       .omp.final.then:
23559 // CHECK24-NEXT:    store i8 96, i8* [[IT]], align 1
23560 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23561 // CHECK24:       .omp.final.done:
23562 // CHECK24-NEXT:    ret void
23563 //
23564 //
23565 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195
23566 // CHECK24-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
23567 // CHECK24-NEXT:  entry:
23568 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23569 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
23570 // CHECK24-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
23571 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
23572 // CHECK24-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
23573 // CHECK24-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
23574 // CHECK24-NEXT:    [[AAA_CASTED:%.*]] = alloca i32, align 4
23575 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23576 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
23577 // CHECK24-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
23578 // CHECK24-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
23579 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
23580 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
23581 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
23582 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
23583 // CHECK24-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
23584 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
23585 // CHECK24-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
23586 // CHECK24-NEXT:    [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
23587 // CHECK24-NEXT:    store i16 [[TMP3]], i16* [[CONV2]], align 2
23588 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
23589 // CHECK24-NEXT:    [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1
23590 // CHECK24-NEXT:    [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8*
23591 // CHECK24-NEXT:    store i8 [[TMP5]], i8* [[CONV3]], align 1
23592 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4
23593 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]])
23594 // CHECK24-NEXT:    ret void
23595 //
23596 //
23597 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..4
23598 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
23599 // CHECK24-NEXT:  entry:
23600 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23601 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23602 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23603 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
23604 // CHECK24-NEXT:    [[AAA_ADDR:%.*]] = alloca i32, align 4
23605 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
23606 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23607 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23608 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23609 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23610 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23611 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
23612 // CHECK24-NEXT:    store i32 [[AAA]], i32* [[AAA_ADDR]], align 4
23613 // CHECK24-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
23614 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
23615 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8*
23616 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
23617 // CHECK24-NEXT:    ret void
23618 //
23619 //
23620 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214
23621 // CHECK24-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] {
23622 // CHECK24-NEXT:  entry:
23623 // CHECK24-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
23624 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
23625 // CHECK24-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
23626 // CHECK24-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
23627 // CHECK24-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
23628 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
23629 // CHECK24-NEXT:    [[B_CASTED:%.*]] = alloca i32, align 4
23630 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
23631 // CHECK24-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
23632 // CHECK24-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
23633 // CHECK24-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
23634 // CHECK24-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
23635 // CHECK24-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
23636 // CHECK24-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
23637 // CHECK24-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
23638 // CHECK24-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
23639 // CHECK24-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
23640 // CHECK24-NEXT:    [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
23641 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
23642 // CHECK24-NEXT:    [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
23643 // CHECK24-NEXT:    [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4
23644 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
23645 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4
23646 // CHECK24-NEXT:    store i32 [[TMP5]], i32* [[B_CASTED]], align 4
23647 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4
23648 // CHECK24-NEXT:    [[TMP7:%.*]] = load i8, i8* [[CONV]], align 1
23649 // CHECK24-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
23650 // CHECK24-NEXT:    [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8*
23651 // CHECK24-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
23652 // CHECK24-NEXT:    store i8 [[FROMBOOL]], i8* [[CONV3]], align 1
23653 // CHECK24-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
23654 // CHECK24-NEXT:    [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1
23655 // CHECK24-NEXT:    [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1
23656 // CHECK24-NEXT:    br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
23657 // CHECK24:       omp_if.then:
23658 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]])
23659 // CHECK24-NEXT:    br label [[OMP_IF_END:%.*]]
23660 // CHECK24:       omp_if.else:
23661 // CHECK24-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
23662 // CHECK24-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
23663 // CHECK24-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
23664 // CHECK24-NEXT:    call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR2:[0-9]+]]
23665 // CHECK24-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
23666 // CHECK24-NEXT:    br label [[OMP_IF_END]]
23667 // CHECK24:       omp_if.end:
23668 // CHECK24-NEXT:    ret void
23669 //
23670 //
23671 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..5
23672 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
23673 // CHECK24-NEXT:  entry:
23674 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23675 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23676 // CHECK24-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
23677 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
23678 // CHECK24-NEXT:    [[VLA_ADDR:%.*]] = alloca i32, align 4
23679 // CHECK24-NEXT:    [[VLA_ADDR2:%.*]] = alloca i32, align 4
23680 // CHECK24-NEXT:    [[C_ADDR:%.*]] = alloca i16*, align 4
23681 // CHECK24-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
23682 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
23683 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i64, align 4
23684 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
23685 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
23686 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
23687 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23688 // CHECK24-NEXT:    [[IT:%.*]] = alloca i64, align 8
23689 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23690 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23691 // CHECK24-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
23692 // CHECK24-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
23693 // CHECK24-NEXT:    store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
23694 // CHECK24-NEXT:    store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4
23695 // CHECK24-NEXT:    store i16* [[C]], i16** [[C_ADDR]], align 4
23696 // CHECK24-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
23697 // CHECK24-NEXT:    [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
23698 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
23699 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4
23700 // CHECK24-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4
23701 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8*
23702 // CHECK24-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
23703 // CHECK24-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
23704 // CHECK24-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
23705 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23706 // CHECK24-NEXT:    [[TMP4:%.*]] = load i8, i8* [[CONV]], align 1
23707 // CHECK24-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1
23708 // CHECK24-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
23709 // CHECK24:       omp_if.then:
23710 // CHECK24-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23711 // CHECK24-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
23712 // CHECK24-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
23713 // CHECK24-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
23714 // CHECK24-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3
23715 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23716 // CHECK24:       cond.true:
23717 // CHECK24-NEXT:    br label [[COND_END:%.*]]
23718 // CHECK24:       cond.false:
23719 // CHECK24-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
23720 // CHECK24-NEXT:    br label [[COND_END]]
23721 // CHECK24:       cond.end:
23722 // CHECK24-NEXT:    [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
23723 // CHECK24-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
23724 // CHECK24-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
23725 // CHECK24-NEXT:    store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8
23726 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23727 // CHECK24:       omp.inner.for.cond:
23728 // CHECK24-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
23729 // CHECK24-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !27
23730 // CHECK24-NEXT:    [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]]
23731 // CHECK24-NEXT:    br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23732 // CHECK24:       omp.inner.for.body:
23733 // CHECK24-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
23734 // CHECK24-NEXT:    [[MUL:%.*]] = mul i64 [[TMP12]], 400
23735 // CHECK24-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
23736 // CHECK24-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !27
23737 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group !27
23738 // CHECK24-NEXT:    [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double
23739 // CHECK24-NEXT:    [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00
23740 // CHECK24-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0
23741 // CHECK24-NEXT:    store double [[ADD]], double* [[A]], align 4, !nontemporal !28, !llvm.access.group !27
23742 // CHECK24-NEXT:    [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
23743 // CHECK24-NEXT:    [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !28, !llvm.access.group !27
23744 // CHECK24-NEXT:    [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00
23745 // CHECK24-NEXT:    store double [[INC]], double* [[A5]], align 4, !nontemporal !28, !llvm.access.group !27
23746 // CHECK24-NEXT:    [[CONV6:%.*]] = fptosi double [[INC]] to i16
23747 // CHECK24-NEXT:    [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]]
23748 // CHECK24-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]]
23749 // CHECK24-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
23750 // CHECK24-NEXT:    store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group !27
23751 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23752 // CHECK24:       omp.body.continue:
23753 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23754 // CHECK24:       omp.inner.for.inc:
23755 // CHECK24-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
23756 // CHECK24-NEXT:    [[ADD8:%.*]] = add i64 [[TMP16]], 1
23757 // CHECK24-NEXT:    store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !27
23758 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
23759 // CHECK24:       omp.inner.for.end:
23760 // CHECK24-NEXT:    br label [[OMP_IF_END:%.*]]
23761 // CHECK24:       omp_if.else:
23762 // CHECK24-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23763 // CHECK24-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
23764 // CHECK24-NEXT:    call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
23765 // CHECK24-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
23766 // CHECK24-NEXT:    [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3
23767 // CHECK24-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
23768 // CHECK24:       cond.true10:
23769 // CHECK24-NEXT:    br label [[COND_END12:%.*]]
23770 // CHECK24:       cond.false11:
23771 // CHECK24-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
23772 // CHECK24-NEXT:    br label [[COND_END12]]
23773 // CHECK24:       cond.end12:
23774 // CHECK24-NEXT:    [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ]
23775 // CHECK24-NEXT:    store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8
23776 // CHECK24-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
23777 // CHECK24-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8
23778 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND14:%.*]]
23779 // CHECK24:       omp.inner.for.cond14:
23780 // CHECK24-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
23781 // CHECK24-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
23782 // CHECK24-NEXT:    [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
23783 // CHECK24-NEXT:    br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]]
23784 // CHECK24:       omp.inner.for.body16:
23785 // CHECK24-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
23786 // CHECK24-NEXT:    [[MUL17:%.*]] = mul i64 [[TMP24]], 400
23787 // CHECK24-NEXT:    [[SUB18:%.*]] = sub i64 2000, [[MUL17]]
23788 // CHECK24-NEXT:    store i64 [[SUB18]], i64* [[IT]], align 8
23789 // CHECK24-NEXT:    [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4
23790 // CHECK24-NEXT:    [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double
23791 // CHECK24-NEXT:    [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00
23792 // CHECK24-NEXT:    [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
23793 // CHECK24-NEXT:    store double [[ADD20]], double* [[A21]], align 4
23794 // CHECK24-NEXT:    [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0
23795 // CHECK24-NEXT:    [[TMP26:%.*]] = load double, double* [[A22]], align 4
23796 // CHECK24-NEXT:    [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00
23797 // CHECK24-NEXT:    store double [[INC23]], double* [[A22]], align 4
23798 // CHECK24-NEXT:    [[CONV24:%.*]] = fptosi double [[INC23]] to i16
23799 // CHECK24-NEXT:    [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]]
23800 // CHECK24-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]]
23801 // CHECK24-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1
23802 // CHECK24-NEXT:    store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2
23803 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE27:%.*]]
23804 // CHECK24:       omp.body.continue27:
23805 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC28:%.*]]
23806 // CHECK24:       omp.inner.for.inc28:
23807 // CHECK24-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
23808 // CHECK24-NEXT:    [[ADD29:%.*]] = add i64 [[TMP28]], 1
23809 // CHECK24-NEXT:    store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8
23810 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP31:![0-9]+]]
23811 // CHECK24:       omp.inner.for.end30:
23812 // CHECK24-NEXT:    br label [[OMP_IF_END]]
23813 // CHECK24:       omp_if.end:
23814 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23815 // CHECK24:       omp.loop.exit:
23816 // CHECK24-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23817 // CHECK24-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
23818 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
23819 // CHECK24-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23820 // CHECK24-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
23821 // CHECK24-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23822 // CHECK24:       .omp.final.then:
23823 // CHECK24-NEXT:    store i64 400, i64* [[IT]], align 8
23824 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23825 // CHECK24:       .omp.final.done:
23826 // CHECK24-NEXT:    ret void
23827 //
23828 //
23829 // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178
23830 // CHECK24-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
23831 // CHECK24-NEXT:  entry:
23832 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23833 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
23834 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
23835 // CHECK24-NEXT:    [[A_CASTED:%.*]] = alloca i32, align 4
23836 // CHECK24-NEXT:    [[AA_CASTED:%.*]] = alloca i32, align 4
23837 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23838 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
23839 // CHECK24-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
23840 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
23841 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
23842 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
23843 // CHECK24-NEXT:    store i32 [[TMP1]], i32* [[A_CASTED]], align 4
23844 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4
23845 // CHECK24-NEXT:    [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2
23846 // CHECK24-NEXT:    [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16*
23847 // CHECK24-NEXT:    store i16 [[TMP3]], i16* [[CONV1]], align 2
23848 // CHECK24-NEXT:    [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4
23849 // CHECK24-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]])
23850 // CHECK24-NEXT:    ret void
23851 //
23852 //
23853 // CHECK24-LABEL: define {{[^@]+}}@.omp_outlined..6
23854 // CHECK24-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
23855 // CHECK24-NEXT:  entry:
23856 // CHECK24-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
23857 // CHECK24-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
23858 // CHECK24-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
23859 // CHECK24-NEXT:    [[AA_ADDR:%.*]] = alloca i32, align 4
23860 // CHECK24-NEXT:    [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4
23861 // CHECK24-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
23862 // CHECK24-NEXT:    [[TMP:%.*]] = alloca i64, align 4
23863 // CHECK24-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
23864 // CHECK24-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
23865 // CHECK24-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
23866 // CHECK24-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
23867 // CHECK24-NEXT:    [[I:%.*]] = alloca i64, align 8
23868 // CHECK24-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
23869 // CHECK24-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
23870 // CHECK24-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
23871 // CHECK24-NEXT:    store i32 [[AA]], i32* [[AA_ADDR]], align 4
23872 // CHECK24-NEXT:    store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4
23873 // CHECK24-NEXT:    [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16*
23874 // CHECK24-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4
23875 // CHECK24-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
23876 // CHECK24-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
23877 // CHECK24-NEXT:    store i64 1, i64* [[DOTOMP_STRIDE]], align 8
23878 // CHECK24-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
23879 // CHECK24-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
23880 // CHECK24-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
23881 // CHECK24-NEXT:    call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1)
23882 // CHECK24-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
23883 // CHECK24-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6
23884 // CHECK24-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
23885 // CHECK24:       cond.true:
23886 // CHECK24-NEXT:    br label [[COND_END:%.*]]
23887 // CHECK24:       cond.false:
23888 // CHECK24-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
23889 // CHECK24-NEXT:    br label [[COND_END]]
23890 // CHECK24:       cond.end:
23891 // CHECK24-NEXT:    [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
23892 // CHECK24-NEXT:    store i64 [[COND]], i64* [[DOTOMP_UB]], align 8
23893 // CHECK24-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
23894 // CHECK24-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
23895 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
23896 // CHECK24:       omp.inner.for.cond:
23897 // CHECK24-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33
23898 // CHECK24-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !33
23899 // CHECK24-NEXT:    [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]]
23900 // CHECK24-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
23901 // CHECK24:       omp.inner.for.body:
23902 // CHECK24-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33
23903 // CHECK24-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3
23904 // CHECK24-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
23905 // CHECK24-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !33
23906 // CHECK24-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !33
23907 // CHECK24-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
23908 // CHECK24-NEXT:    store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group !33
23909 // CHECK24-NEXT:    [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group !33
23910 // CHECK24-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP10]] to i32
23911 // CHECK24-NEXT:    [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1
23912 // CHECK24-NEXT:    [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16
23913 // CHECK24-NEXT:    store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group !33
23914 // CHECK24-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2
23915 // CHECK24-NEXT:    [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
23916 // CHECK24-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1
23917 // CHECK24-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !33
23918 // CHECK24-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
23919 // CHECK24:       omp.body.continue:
23920 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
23921 // CHECK24:       omp.inner.for.inc:
23922 // CHECK24-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33
23923 // CHECK24-NEXT:    [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1
23924 // CHECK24-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !33
23925 // CHECK24-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
23926 // CHECK24:       omp.inner.for.end:
23927 // CHECK24-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
23928 // CHECK24:       omp.loop.exit:
23929 // CHECK24-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
23930 // CHECK24-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
23931 // CHECK24-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
23932 // CHECK24-NEXT:    br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
23933 // CHECK24:       .omp.final.then:
23934 // CHECK24-NEXT:    store i64 11, i64* [[I]], align 8
23935 // CHECK24-NEXT:    br label [[DOTOMP_FINAL_DONE]]
23936 // CHECK24:       .omp.final.done:
23937 // CHECK24-NEXT:    ret void
23938 //
23939 //
23940 // CHECK25-LABEL: define {{[^@]+}}@_Z7get_valv
23941 // CHECK25-SAME: () #[[ATTR0:[0-9]+]] {
23942 // CHECK25-NEXT:  entry:
23943 // CHECK25-NEXT:    ret i64 0
23944 //
23945 //
23946 // CHECK25-LABEL: define {{[^@]+}}@_Z3fooi
23947 // CHECK25-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
23948 // CHECK25-NEXT:  entry:
23949 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
23950 // CHECK25-NEXT:    [[A:%.*]] = alloca i32, align 4
23951 // CHECK25-NEXT:    [[AA:%.*]] = alloca i16, align 2
23952 // CHECK25-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
23953 // CHECK25-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
23954 // CHECK25-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
23955 // CHECK25-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
23956 // CHECK25-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
23957 // CHECK25-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
23958 // CHECK25-NEXT:    [[TMP:%.*]] = alloca i32, align 4
23959 // CHECK25-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
23960 // CHECK25-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
23961 // CHECK25-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
23962 // CHECK25-NEXT:    [[I:%.*]] = alloca i32, align 4
23963 // CHECK25-NEXT:    [[K:%.*]] = alloca i64, align 8
23964 // CHECK25-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
23965 // CHECK25-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
23966 // CHECK25-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
23967 // CHECK25-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
23968 // CHECK25-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
23969 // CHECK25-NEXT:    [[I7:%.*]] = alloca i32, align 4
23970 // CHECK25-NEXT:    [[K8:%.*]] = alloca i64, align 8
23971 // CHECK25-NEXT:    [[LIN:%.*]] = alloca i32, align 4
23972 // CHECK25-NEXT:    [[_TMP20:%.*]] = alloca i64, align 8
23973 // CHECK25-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
23974 // CHECK25-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
23975 // CHECK25-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
23976 // CHECK25-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
23977 // CHECK25-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
23978 // CHECK25-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
23979 // CHECK25-NEXT:    [[IT:%.*]] = alloca i64, align 8
23980 // CHECK25-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
23981 // CHECK25-NEXT:    [[A28:%.*]] = alloca i32, align 4
23982 // CHECK25-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
23983 // CHECK25-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
23984 // CHECK25-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
23985 // CHECK25-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
23986 // CHECK25-NEXT:    [[IT53:%.*]] = alloca i16, align 2
23987 // CHECK25-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
23988 // CHECK25-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
23989 // CHECK25-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
23990 // CHECK25-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
23991 // CHECK25-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
23992 // CHECK25-NEXT:    [[IT72:%.*]] = alloca i8, align 1
23993 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
23994 // CHECK25-NEXT:    store i32 0, i32* [[A]], align 4
23995 // CHECK25-NEXT:    store i16 0, i16* [[AA]], align 2
23996 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
23997 // CHECK25-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
23998 // CHECK25-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
23999 // CHECK25-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
24000 // CHECK25-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
24001 // CHECK25-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
24002 // CHECK25-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
24003 // CHECK25-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
24004 // CHECK25-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
24005 // CHECK25-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
24006 // CHECK25-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
24007 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24008 // CHECK25-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
24009 // CHECK25-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24010 // CHECK25-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
24011 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24012 // CHECK25:       omp.inner.for.cond:
24013 // CHECK25-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
24014 // CHECK25-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
24015 // CHECK25-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
24016 // CHECK25-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24017 // CHECK25:       omp.inner.for.body:
24018 // CHECK25-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
24019 // CHECK25-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5
24020 // CHECK25-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
24021 // CHECK25-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
24022 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24023 // CHECK25:       omp.body.continue:
24024 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24025 // CHECK25:       omp.inner.for.inc:
24026 // CHECK25-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
24027 // CHECK25-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
24028 // CHECK25-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
24029 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
24030 // CHECK25:       omp.inner.for.end:
24031 // CHECK25-NEXT:    store i32 33, i32* [[I]], align 4
24032 // CHECK25-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
24033 // CHECK25-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
24034 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
24035 // CHECK25-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
24036 // CHECK25-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
24037 // CHECK25-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4
24038 // CHECK25-NEXT:    [[TMP12:%.*]] = load i64, i64* [[K]], align 8
24039 // CHECK25-NEXT:    store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8
24040 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
24041 // CHECK25:       omp.inner.for.cond9:
24042 // CHECK25-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24043 // CHECK25-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
24044 // CHECK25-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
24045 // CHECK25-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
24046 // CHECK25:       omp.inner.for.body11:
24047 // CHECK25-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24048 // CHECK25-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1
24049 // CHECK25-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
24050 // CHECK25-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6
24051 // CHECK25-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6
24052 // CHECK25-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24053 // CHECK25-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3
24054 // CHECK25-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
24055 // CHECK25-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]]
24056 // CHECK25-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6
24057 // CHECK25-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6
24058 // CHECK25-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
24059 // CHECK25-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6
24060 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
24061 // CHECK25:       omp.body.continue16:
24062 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
24063 // CHECK25:       omp.inner.for.inc17:
24064 // CHECK25-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24065 // CHECK25-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
24066 // CHECK25-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24067 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]]
24068 // CHECK25:       omp.inner.for.end19:
24069 // CHECK25-NEXT:    store i32 1, i32* [[I7]], align 4
24070 // CHECK25-NEXT:    [[TMP20:%.*]] = load i64, i64* [[K8]], align 8
24071 // CHECK25-NEXT:    store i64 [[TMP20]], i64* [[K]], align 8
24072 // CHECK25-NEXT:    store i32 12, i32* [[LIN]], align 4
24073 // CHECK25-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
24074 // CHECK25-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
24075 // CHECK25-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
24076 // CHECK25-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8
24077 // CHECK25-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4
24078 // CHECK25-NEXT:    store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4
24079 // CHECK25-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A]], align 4
24080 // CHECK25-NEXT:    store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4
24081 // CHECK25-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
24082 // CHECK25-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
24083 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
24084 // CHECK25:       omp.inner.for.cond29:
24085 // CHECK25-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24086 // CHECK25-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9
24087 // CHECK25-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]]
24088 // CHECK25-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
24089 // CHECK25:       omp.inner.for.body31:
24090 // CHECK25-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24091 // CHECK25-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP26]], 400
24092 // CHECK25-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
24093 // CHECK25-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9
24094 // CHECK25-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9
24095 // CHECK25-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP27]] to i64
24096 // CHECK25-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24097 // CHECK25-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
24098 // CHECK25-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]]
24099 // CHECK25-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
24100 // CHECK25-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
24101 // CHECK25-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9
24102 // CHECK25-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9
24103 // CHECK25-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP30]] to i64
24104 // CHECK25-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24105 // CHECK25-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
24106 // CHECK25-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]]
24107 // CHECK25-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
24108 // CHECK25-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
24109 // CHECK25-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9
24110 // CHECK25-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
24111 // CHECK25-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP33]] to i32
24112 // CHECK25-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
24113 // CHECK25-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
24114 // CHECK25-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9
24115 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
24116 // CHECK25:       omp.body.continue45:
24117 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
24118 // CHECK25:       omp.inner.for.inc46:
24119 // CHECK25-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24120 // CHECK25-NEXT:    [[ADD47:%.*]] = add i64 [[TMP34]], 1
24121 // CHECK25-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24122 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]]
24123 // CHECK25:       omp.inner.for.end48:
24124 // CHECK25-NEXT:    store i64 400, i64* [[IT]], align 8
24125 // CHECK25-NEXT:    [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4
24126 // CHECK25-NEXT:    store i32 [[TMP35]], i32* [[LIN]], align 4
24127 // CHECK25-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A28]], align 4
24128 // CHECK25-NEXT:    store i32 [[TMP36]], i32* [[A]], align 4
24129 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
24130 // CHECK25-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
24131 // CHECK25-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
24132 // CHECK25-NEXT:    store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4
24133 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
24134 // CHECK25:       omp.inner.for.cond54:
24135 // CHECK25-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
24136 // CHECK25-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12
24137 // CHECK25-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]]
24138 // CHECK25-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
24139 // CHECK25:       omp.inner.for.body56:
24140 // CHECK25-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
24141 // CHECK25-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4
24142 // CHECK25-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
24143 // CHECK25-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
24144 // CHECK25-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12
24145 // CHECK25-NEXT:    [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
24146 // CHECK25-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1
24147 // CHECK25-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12
24148 // CHECK25-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
24149 // CHECK25-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP42]] to i32
24150 // CHECK25-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
24151 // CHECK25-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
24152 // CHECK25-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12
24153 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
24154 // CHECK25:       omp.body.continue64:
24155 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
24156 // CHECK25:       omp.inner.for.inc65:
24157 // CHECK25-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
24158 // CHECK25-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1
24159 // CHECK25-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
24160 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]]
24161 // CHECK25:       omp.inner.for.end67:
24162 // CHECK25-NEXT:    store i16 22, i16* [[IT53]], align 2
24163 // CHECK25-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
24164 // CHECK25-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4
24165 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
24166 // CHECK25-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
24167 // CHECK25-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
24168 // CHECK25-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4
24169 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
24170 // CHECK25:       omp.inner.for.cond73:
24171 // CHECK25-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
24172 // CHECK25-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15
24173 // CHECK25-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]]
24174 // CHECK25-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
24175 // CHECK25:       omp.inner.for.body75:
24176 // CHECK25-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
24177 // CHECK25-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1
24178 // CHECK25-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
24179 // CHECK25-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
24180 // CHECK25-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15
24181 // CHECK25-NEXT:    [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
24182 // CHECK25-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1
24183 // CHECK25-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15
24184 // CHECK25-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
24185 // CHECK25-NEXT:    [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
24186 // CHECK25-NEXT:    [[CONV80:%.*]] = fpext float [[TMP50]] to double
24187 // CHECK25-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
24188 // CHECK25-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
24189 // CHECK25-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
24190 // CHECK25-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
24191 // CHECK25-NEXT:    [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
24192 // CHECK25-NEXT:    [[CONV84:%.*]] = fpext float [[TMP51]] to double
24193 // CHECK25-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
24194 // CHECK25-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
24195 // CHECK25-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
24196 // CHECK25-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
24197 // CHECK25-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2
24198 // CHECK25-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
24199 // CHECK25-NEXT:    [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00
24200 // CHECK25-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
24201 // CHECK25-NEXT:    [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]]
24202 // CHECK25-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]]
24203 // CHECK25-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3
24204 // CHECK25-NEXT:    [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
24205 // CHECK25-NEXT:    [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00
24206 // CHECK25-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
24207 // CHECK25-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
24208 // CHECK25-NEXT:    [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
24209 // CHECK25-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1
24210 // CHECK25-NEXT:    store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15
24211 // CHECK25-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
24212 // CHECK25-NEXT:    [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
24213 // CHECK25-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP56]] to i32
24214 // CHECK25-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
24215 // CHECK25-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
24216 // CHECK25-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15
24217 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
24218 // CHECK25:       omp.body.continue97:
24219 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
24220 // CHECK25:       omp.inner.for.inc98:
24221 // CHECK25-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
24222 // CHECK25-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1
24223 // CHECK25-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
24224 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]]
24225 // CHECK25:       omp.inner.for.end100:
24226 // CHECK25-NEXT:    store i8 96, i8* [[IT72]], align 1
24227 // CHECK25-NEXT:    [[TMP58:%.*]] = load i32, i32* [[A]], align 4
24228 // CHECK25-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
24229 // CHECK25-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
24230 // CHECK25-NEXT:    ret i32 [[TMP58]]
24231 //
24232 //
24233 // CHECK25-LABEL: define {{[^@]+}}@_Z3bari
24234 // CHECK25-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
24235 // CHECK25-NEXT:  entry:
24236 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24237 // CHECK25-NEXT:    [[A:%.*]] = alloca i32, align 4
24238 // CHECK25-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
24239 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24240 // CHECK25-NEXT:    store i32 0, i32* [[A]], align 4
24241 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
24242 // CHECK25-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
24243 // CHECK25-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
24244 // CHECK25-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
24245 // CHECK25-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
24246 // CHECK25-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
24247 // CHECK25-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
24248 // CHECK25-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
24249 // CHECK25-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
24250 // CHECK25-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
24251 // CHECK25-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
24252 // CHECK25-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
24253 // CHECK25-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
24254 // CHECK25-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
24255 // CHECK25-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
24256 // CHECK25-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
24257 // CHECK25-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
24258 // CHECK25-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
24259 // CHECK25-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
24260 // CHECK25-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
24261 // CHECK25-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
24262 // CHECK25-NEXT:    ret i32 [[TMP8]]
24263 //
24264 //
24265 // CHECK25-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
24266 // CHECK25-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
24267 // CHECK25-NEXT:  entry:
24268 // CHECK25-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
24269 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24270 // CHECK25-NEXT:    [[B:%.*]] = alloca i32, align 4
24271 // CHECK25-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
24272 // CHECK25-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
24273 // CHECK25-NEXT:    [[TMP:%.*]] = alloca i64, align 8
24274 // CHECK25-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
24275 // CHECK25-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
24276 // CHECK25-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
24277 // CHECK25-NEXT:    [[IT:%.*]] = alloca i64, align 8
24278 // CHECK25-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
24279 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24280 // CHECK25-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
24281 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
24282 // CHECK25-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
24283 // CHECK25-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
24284 // CHECK25-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
24285 // CHECK25-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
24286 // CHECK25-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
24287 // CHECK25-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
24288 // CHECK25-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
24289 // CHECK25-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
24290 // CHECK25-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
24291 // CHECK25-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
24292 // CHECK25-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
24293 // CHECK25-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
24294 // CHECK25-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
24295 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24296 // CHECK25:       omp.inner.for.cond:
24297 // CHECK25-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
24298 // CHECK25-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
24299 // CHECK25-NEXT:    [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]]
24300 // CHECK25-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24301 // CHECK25:       omp.inner.for.body:
24302 // CHECK25-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
24303 // CHECK25-NEXT:    [[MUL:%.*]] = mul i64 [[TMP8]], 400
24304 // CHECK25-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
24305 // CHECK25-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
24306 // CHECK25-NEXT:    [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
24307 // CHECK25-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP9]] to double
24308 // CHECK25-NEXT:    [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
24309 // CHECK25-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
24310 // CHECK25-NEXT:    store double [[ADD2]], double* [[A]], align 8, !llvm.access.group !18
24311 // CHECK25-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
24312 // CHECK25-NEXT:    [[TMP10:%.*]] = load double, double* [[A3]], align 8, !llvm.access.group !18
24313 // CHECK25-NEXT:    [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00
24314 // CHECK25-NEXT:    store double [[INC]], double* [[A3]], align 8, !llvm.access.group !18
24315 // CHECK25-NEXT:    [[CONV4:%.*]] = fptosi double [[INC]] to i16
24316 // CHECK25-NEXT:    [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]]
24317 // CHECK25-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]]
24318 // CHECK25-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
24319 // CHECK25-NEXT:    store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !18
24320 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24321 // CHECK25:       omp.body.continue:
24322 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24323 // CHECK25:       omp.inner.for.inc:
24324 // CHECK25-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
24325 // CHECK25-NEXT:    [[ADD6:%.*]] = add i64 [[TMP12]], 1
24326 // CHECK25-NEXT:    store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
24327 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
24328 // CHECK25:       omp.inner.for.end:
24329 // CHECK25-NEXT:    store i64 400, i64* [[IT]], align 8
24330 // CHECK25-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
24331 // CHECK25-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
24332 // CHECK25-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i64 1
24333 // CHECK25-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2
24334 // CHECK25-NEXT:    [[CONV9:%.*]] = sext i16 [[TMP14]] to i32
24335 // CHECK25-NEXT:    [[TMP15:%.*]] = load i32, i32* [[B]], align 4
24336 // CHECK25-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]]
24337 // CHECK25-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
24338 // CHECK25-NEXT:    call void @llvm.stackrestore(i8* [[TMP16]])
24339 // CHECK25-NEXT:    ret i32 [[ADD10]]
24340 //
24341 //
24342 // CHECK25-LABEL: define {{[^@]+}}@_ZL7fstatici
24343 // CHECK25-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
24344 // CHECK25-NEXT:  entry:
24345 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24346 // CHECK25-NEXT:    [[A:%.*]] = alloca i32, align 4
24347 // CHECK25-NEXT:    [[AA:%.*]] = alloca i16, align 2
24348 // CHECK25-NEXT:    [[AAA:%.*]] = alloca i8, align 1
24349 // CHECK25-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
24350 // CHECK25-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24351 // CHECK25-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24352 // CHECK25-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24353 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24354 // CHECK25-NEXT:    store i32 0, i32* [[A]], align 4
24355 // CHECK25-NEXT:    store i16 0, i16* [[AA]], align 2
24356 // CHECK25-NEXT:    store i8 0, i8* [[AAA]], align 1
24357 // CHECK25-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24358 // CHECK25-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
24359 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
24360 // CHECK25-NEXT:    ret i32 [[TMP0]]
24361 //
24362 //
24363 // CHECK25-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
24364 // CHECK25-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
24365 // CHECK25-NEXT:  entry:
24366 // CHECK25-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24367 // CHECK25-NEXT:    [[A:%.*]] = alloca i32, align 4
24368 // CHECK25-NEXT:    [[AA:%.*]] = alloca i16, align 2
24369 // CHECK25-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
24370 // CHECK25-NEXT:    [[TMP:%.*]] = alloca i64, align 8
24371 // CHECK25-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
24372 // CHECK25-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
24373 // CHECK25-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
24374 // CHECK25-NEXT:    [[I:%.*]] = alloca i64, align 8
24375 // CHECK25-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24376 // CHECK25-NEXT:    store i32 0, i32* [[A]], align 4
24377 // CHECK25-NEXT:    store i16 0, i16* [[AA]], align 2
24378 // CHECK25-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
24379 // CHECK25-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
24380 // CHECK25-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
24381 // CHECK25-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
24382 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24383 // CHECK25:       omp.inner.for.cond:
24384 // CHECK25-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
24385 // CHECK25-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !21
24386 // CHECK25-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
24387 // CHECK25-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24388 // CHECK25:       omp.inner.for.body:
24389 // CHECK25-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
24390 // CHECK25-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
24391 // CHECK25-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
24392 // CHECK25-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !21
24393 // CHECK25-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21
24394 // CHECK25-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
24395 // CHECK25-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !21
24396 // CHECK25-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21
24397 // CHECK25-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
24398 // CHECK25-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
24399 // CHECK25-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
24400 // CHECK25-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !21
24401 // CHECK25-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
24402 // CHECK25-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
24403 // CHECK25-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
24404 // CHECK25-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
24405 // CHECK25-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24406 // CHECK25:       omp.body.continue:
24407 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24408 // CHECK25:       omp.inner.for.inc:
24409 // CHECK25-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
24410 // CHECK25-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
24411 // CHECK25-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
24412 // CHECK25-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
24413 // CHECK25:       omp.inner.for.end:
24414 // CHECK25-NEXT:    store i64 11, i64* [[I]], align 8
24415 // CHECK25-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
24416 // CHECK25-NEXT:    ret i32 [[TMP8]]
24417 //
24418 //
24419 // CHECK26-LABEL: define {{[^@]+}}@_Z7get_valv
24420 // CHECK26-SAME: () #[[ATTR0:[0-9]+]] {
24421 // CHECK26-NEXT:  entry:
24422 // CHECK26-NEXT:    ret i64 0
24423 //
24424 //
24425 // CHECK26-LABEL: define {{[^@]+}}@_Z3fooi
24426 // CHECK26-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
24427 // CHECK26-NEXT:  entry:
24428 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24429 // CHECK26-NEXT:    [[A:%.*]] = alloca i32, align 4
24430 // CHECK26-NEXT:    [[AA:%.*]] = alloca i16, align 2
24431 // CHECK26-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
24432 // CHECK26-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
24433 // CHECK26-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
24434 // CHECK26-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
24435 // CHECK26-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
24436 // CHECK26-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
24437 // CHECK26-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24438 // CHECK26-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24439 // CHECK26-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24440 // CHECK26-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24441 // CHECK26-NEXT:    [[I:%.*]] = alloca i32, align 4
24442 // CHECK26-NEXT:    [[K:%.*]] = alloca i64, align 8
24443 // CHECK26-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
24444 // CHECK26-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
24445 // CHECK26-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
24446 // CHECK26-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
24447 // CHECK26-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
24448 // CHECK26-NEXT:    [[I7:%.*]] = alloca i32, align 4
24449 // CHECK26-NEXT:    [[K8:%.*]] = alloca i64, align 8
24450 // CHECK26-NEXT:    [[LIN:%.*]] = alloca i32, align 4
24451 // CHECK26-NEXT:    [[_TMP20:%.*]] = alloca i64, align 8
24452 // CHECK26-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
24453 // CHECK26-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
24454 // CHECK26-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
24455 // CHECK26-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
24456 // CHECK26-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
24457 // CHECK26-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
24458 // CHECK26-NEXT:    [[IT:%.*]] = alloca i64, align 8
24459 // CHECK26-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
24460 // CHECK26-NEXT:    [[A28:%.*]] = alloca i32, align 4
24461 // CHECK26-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
24462 // CHECK26-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
24463 // CHECK26-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
24464 // CHECK26-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
24465 // CHECK26-NEXT:    [[IT53:%.*]] = alloca i16, align 2
24466 // CHECK26-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24467 // CHECK26-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
24468 // CHECK26-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
24469 // CHECK26-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
24470 // CHECK26-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
24471 // CHECK26-NEXT:    [[IT72:%.*]] = alloca i8, align 1
24472 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24473 // CHECK26-NEXT:    store i32 0, i32* [[A]], align 4
24474 // CHECK26-NEXT:    store i16 0, i16* [[AA]], align 2
24475 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
24476 // CHECK26-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
24477 // CHECK26-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
24478 // CHECK26-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
24479 // CHECK26-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
24480 // CHECK26-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
24481 // CHECK26-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
24482 // CHECK26-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
24483 // CHECK26-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
24484 // CHECK26-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
24485 // CHECK26-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
24486 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24487 // CHECK26-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
24488 // CHECK26-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24489 // CHECK26-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
24490 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24491 // CHECK26:       omp.inner.for.cond:
24492 // CHECK26-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
24493 // CHECK26-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
24494 // CHECK26-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
24495 // CHECK26-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24496 // CHECK26:       omp.inner.for.body:
24497 // CHECK26-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
24498 // CHECK26-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5
24499 // CHECK26-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
24500 // CHECK26-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
24501 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24502 // CHECK26:       omp.body.continue:
24503 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24504 // CHECK26:       omp.inner.for.inc:
24505 // CHECK26-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
24506 // CHECK26-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
24507 // CHECK26-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
24508 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
24509 // CHECK26:       omp.inner.for.end:
24510 // CHECK26-NEXT:    store i32 33, i32* [[I]], align 4
24511 // CHECK26-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
24512 // CHECK26-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
24513 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
24514 // CHECK26-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
24515 // CHECK26-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
24516 // CHECK26-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4
24517 // CHECK26-NEXT:    [[TMP12:%.*]] = load i64, i64* [[K]], align 8
24518 // CHECK26-NEXT:    store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8
24519 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
24520 // CHECK26:       omp.inner.for.cond9:
24521 // CHECK26-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24522 // CHECK26-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
24523 // CHECK26-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
24524 // CHECK26-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
24525 // CHECK26:       omp.inner.for.body11:
24526 // CHECK26-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24527 // CHECK26-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1
24528 // CHECK26-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
24529 // CHECK26-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6
24530 // CHECK26-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6
24531 // CHECK26-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24532 // CHECK26-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3
24533 // CHECK26-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
24534 // CHECK26-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]]
24535 // CHECK26-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6
24536 // CHECK26-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6
24537 // CHECK26-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
24538 // CHECK26-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6
24539 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
24540 // CHECK26:       omp.body.continue16:
24541 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
24542 // CHECK26:       omp.inner.for.inc17:
24543 // CHECK26-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24544 // CHECK26-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
24545 // CHECK26-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
24546 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]]
24547 // CHECK26:       omp.inner.for.end19:
24548 // CHECK26-NEXT:    store i32 1, i32* [[I7]], align 4
24549 // CHECK26-NEXT:    [[TMP20:%.*]] = load i64, i64* [[K8]], align 8
24550 // CHECK26-NEXT:    store i64 [[TMP20]], i64* [[K]], align 8
24551 // CHECK26-NEXT:    store i32 12, i32* [[LIN]], align 4
24552 // CHECK26-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
24553 // CHECK26-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
24554 // CHECK26-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
24555 // CHECK26-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8
24556 // CHECK26-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4
24557 // CHECK26-NEXT:    store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4
24558 // CHECK26-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A]], align 4
24559 // CHECK26-NEXT:    store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4
24560 // CHECK26-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
24561 // CHECK26-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
24562 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
24563 // CHECK26:       omp.inner.for.cond29:
24564 // CHECK26-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24565 // CHECK26-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9
24566 // CHECK26-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]]
24567 // CHECK26-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
24568 // CHECK26:       omp.inner.for.body31:
24569 // CHECK26-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24570 // CHECK26-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP26]], 400
24571 // CHECK26-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
24572 // CHECK26-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9
24573 // CHECK26-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9
24574 // CHECK26-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP27]] to i64
24575 // CHECK26-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24576 // CHECK26-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
24577 // CHECK26-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]]
24578 // CHECK26-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
24579 // CHECK26-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
24580 // CHECK26-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9
24581 // CHECK26-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9
24582 // CHECK26-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP30]] to i64
24583 // CHECK26-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24584 // CHECK26-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
24585 // CHECK26-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]]
24586 // CHECK26-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
24587 // CHECK26-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
24588 // CHECK26-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9
24589 // CHECK26-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
24590 // CHECK26-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP33]] to i32
24591 // CHECK26-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
24592 // CHECK26-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
24593 // CHECK26-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9
24594 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
24595 // CHECK26:       omp.body.continue45:
24596 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
24597 // CHECK26:       omp.inner.for.inc46:
24598 // CHECK26-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24599 // CHECK26-NEXT:    [[ADD47:%.*]] = add i64 [[TMP34]], 1
24600 // CHECK26-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
24601 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]]
24602 // CHECK26:       omp.inner.for.end48:
24603 // CHECK26-NEXT:    store i64 400, i64* [[IT]], align 8
24604 // CHECK26-NEXT:    [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4
24605 // CHECK26-NEXT:    store i32 [[TMP35]], i32* [[LIN]], align 4
24606 // CHECK26-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A28]], align 4
24607 // CHECK26-NEXT:    store i32 [[TMP36]], i32* [[A]], align 4
24608 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
24609 // CHECK26-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
24610 // CHECK26-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
24611 // CHECK26-NEXT:    store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4
24612 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
24613 // CHECK26:       omp.inner.for.cond54:
24614 // CHECK26-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
24615 // CHECK26-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12
24616 // CHECK26-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]]
24617 // CHECK26-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
24618 // CHECK26:       omp.inner.for.body56:
24619 // CHECK26-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
24620 // CHECK26-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4
24621 // CHECK26-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
24622 // CHECK26-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
24623 // CHECK26-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12
24624 // CHECK26-NEXT:    [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
24625 // CHECK26-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1
24626 // CHECK26-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12
24627 // CHECK26-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
24628 // CHECK26-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP42]] to i32
24629 // CHECK26-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
24630 // CHECK26-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
24631 // CHECK26-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12
24632 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
24633 // CHECK26:       omp.body.continue64:
24634 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
24635 // CHECK26:       omp.inner.for.inc65:
24636 // CHECK26-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
24637 // CHECK26-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1
24638 // CHECK26-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
24639 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]]
24640 // CHECK26:       omp.inner.for.end67:
24641 // CHECK26-NEXT:    store i16 22, i16* [[IT53]], align 2
24642 // CHECK26-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
24643 // CHECK26-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4
24644 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
24645 // CHECK26-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
24646 // CHECK26-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
24647 // CHECK26-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4
24648 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
24649 // CHECK26:       omp.inner.for.cond73:
24650 // CHECK26-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
24651 // CHECK26-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15
24652 // CHECK26-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]]
24653 // CHECK26-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
24654 // CHECK26:       omp.inner.for.body75:
24655 // CHECK26-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
24656 // CHECK26-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1
24657 // CHECK26-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
24658 // CHECK26-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
24659 // CHECK26-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15
24660 // CHECK26-NEXT:    [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
24661 // CHECK26-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1
24662 // CHECK26-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15
24663 // CHECK26-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
24664 // CHECK26-NEXT:    [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
24665 // CHECK26-NEXT:    [[CONV80:%.*]] = fpext float [[TMP50]] to double
24666 // CHECK26-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
24667 // CHECK26-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
24668 // CHECK26-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
24669 // CHECK26-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
24670 // CHECK26-NEXT:    [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
24671 // CHECK26-NEXT:    [[CONV84:%.*]] = fpext float [[TMP51]] to double
24672 // CHECK26-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
24673 // CHECK26-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
24674 // CHECK26-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
24675 // CHECK26-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
24676 // CHECK26-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2
24677 // CHECK26-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
24678 // CHECK26-NEXT:    [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00
24679 // CHECK26-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
24680 // CHECK26-NEXT:    [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]]
24681 // CHECK26-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]]
24682 // CHECK26-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3
24683 // CHECK26-NEXT:    [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
24684 // CHECK26-NEXT:    [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00
24685 // CHECK26-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
24686 // CHECK26-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
24687 // CHECK26-NEXT:    [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
24688 // CHECK26-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1
24689 // CHECK26-NEXT:    store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15
24690 // CHECK26-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
24691 // CHECK26-NEXT:    [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
24692 // CHECK26-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP56]] to i32
24693 // CHECK26-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
24694 // CHECK26-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
24695 // CHECK26-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15
24696 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
24697 // CHECK26:       omp.body.continue97:
24698 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
24699 // CHECK26:       omp.inner.for.inc98:
24700 // CHECK26-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
24701 // CHECK26-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1
24702 // CHECK26-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
24703 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]]
24704 // CHECK26:       omp.inner.for.end100:
24705 // CHECK26-NEXT:    store i8 96, i8* [[IT72]], align 1
24706 // CHECK26-NEXT:    [[TMP58:%.*]] = load i32, i32* [[A]], align 4
24707 // CHECK26-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
24708 // CHECK26-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
24709 // CHECK26-NEXT:    ret i32 [[TMP58]]
24710 //
24711 //
24712 // CHECK26-LABEL: define {{[^@]+}}@_Z3bari
24713 // CHECK26-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
24714 // CHECK26-NEXT:  entry:
24715 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24716 // CHECK26-NEXT:    [[A:%.*]] = alloca i32, align 4
24717 // CHECK26-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
24718 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24719 // CHECK26-NEXT:    store i32 0, i32* [[A]], align 4
24720 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
24721 // CHECK26-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
24722 // CHECK26-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
24723 // CHECK26-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
24724 // CHECK26-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
24725 // CHECK26-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
24726 // CHECK26-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
24727 // CHECK26-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
24728 // CHECK26-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
24729 // CHECK26-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
24730 // CHECK26-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
24731 // CHECK26-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
24732 // CHECK26-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
24733 // CHECK26-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
24734 // CHECK26-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
24735 // CHECK26-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
24736 // CHECK26-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
24737 // CHECK26-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
24738 // CHECK26-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
24739 // CHECK26-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
24740 // CHECK26-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
24741 // CHECK26-NEXT:    ret i32 [[TMP8]]
24742 //
24743 //
24744 // CHECK26-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
24745 // CHECK26-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
24746 // CHECK26-NEXT:  entry:
24747 // CHECK26-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
24748 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24749 // CHECK26-NEXT:    [[B:%.*]] = alloca i32, align 4
24750 // CHECK26-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
24751 // CHECK26-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
24752 // CHECK26-NEXT:    [[TMP:%.*]] = alloca i64, align 8
24753 // CHECK26-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
24754 // CHECK26-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
24755 // CHECK26-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
24756 // CHECK26-NEXT:    [[IT:%.*]] = alloca i64, align 8
24757 // CHECK26-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
24758 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24759 // CHECK26-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
24760 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
24761 // CHECK26-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
24762 // CHECK26-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
24763 // CHECK26-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
24764 // CHECK26-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
24765 // CHECK26-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
24766 // CHECK26-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
24767 // CHECK26-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
24768 // CHECK26-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
24769 // CHECK26-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
24770 // CHECK26-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
24771 // CHECK26-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
24772 // CHECK26-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
24773 // CHECK26-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
24774 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24775 // CHECK26:       omp.inner.for.cond:
24776 // CHECK26-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
24777 // CHECK26-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
24778 // CHECK26-NEXT:    [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]]
24779 // CHECK26-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24780 // CHECK26:       omp.inner.for.body:
24781 // CHECK26-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
24782 // CHECK26-NEXT:    [[MUL:%.*]] = mul i64 [[TMP8]], 400
24783 // CHECK26-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
24784 // CHECK26-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
24785 // CHECK26-NEXT:    [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
24786 // CHECK26-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP9]] to double
24787 // CHECK26-NEXT:    [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
24788 // CHECK26-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
24789 // CHECK26-NEXT:    store double [[ADD2]], double* [[A]], align 8, !llvm.access.group !18
24790 // CHECK26-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
24791 // CHECK26-NEXT:    [[TMP10:%.*]] = load double, double* [[A3]], align 8, !llvm.access.group !18
24792 // CHECK26-NEXT:    [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00
24793 // CHECK26-NEXT:    store double [[INC]], double* [[A3]], align 8, !llvm.access.group !18
24794 // CHECK26-NEXT:    [[CONV4:%.*]] = fptosi double [[INC]] to i16
24795 // CHECK26-NEXT:    [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]]
24796 // CHECK26-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]]
24797 // CHECK26-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
24798 // CHECK26-NEXT:    store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !18
24799 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24800 // CHECK26:       omp.body.continue:
24801 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24802 // CHECK26:       omp.inner.for.inc:
24803 // CHECK26-NEXT:    [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
24804 // CHECK26-NEXT:    [[ADD6:%.*]] = add i64 [[TMP12]], 1
24805 // CHECK26-NEXT:    store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
24806 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
24807 // CHECK26:       omp.inner.for.end:
24808 // CHECK26-NEXT:    store i64 400, i64* [[IT]], align 8
24809 // CHECK26-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
24810 // CHECK26-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
24811 // CHECK26-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i64 1
24812 // CHECK26-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2
24813 // CHECK26-NEXT:    [[CONV9:%.*]] = sext i16 [[TMP14]] to i32
24814 // CHECK26-NEXT:    [[TMP15:%.*]] = load i32, i32* [[B]], align 4
24815 // CHECK26-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]]
24816 // CHECK26-NEXT:    [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
24817 // CHECK26-NEXT:    call void @llvm.stackrestore(i8* [[TMP16]])
24818 // CHECK26-NEXT:    ret i32 [[ADD10]]
24819 //
24820 //
24821 // CHECK26-LABEL: define {{[^@]+}}@_ZL7fstatici
24822 // CHECK26-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
24823 // CHECK26-NEXT:  entry:
24824 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24825 // CHECK26-NEXT:    [[A:%.*]] = alloca i32, align 4
24826 // CHECK26-NEXT:    [[AA:%.*]] = alloca i16, align 2
24827 // CHECK26-NEXT:    [[AAA:%.*]] = alloca i8, align 1
24828 // CHECK26-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
24829 // CHECK26-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24830 // CHECK26-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24831 // CHECK26-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24832 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24833 // CHECK26-NEXT:    store i32 0, i32* [[A]], align 4
24834 // CHECK26-NEXT:    store i16 0, i16* [[AA]], align 2
24835 // CHECK26-NEXT:    store i8 0, i8* [[AAA]], align 1
24836 // CHECK26-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24837 // CHECK26-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
24838 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
24839 // CHECK26-NEXT:    ret i32 [[TMP0]]
24840 //
24841 //
24842 // CHECK26-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
24843 // CHECK26-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
24844 // CHECK26-NEXT:  entry:
24845 // CHECK26-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24846 // CHECK26-NEXT:    [[A:%.*]] = alloca i32, align 4
24847 // CHECK26-NEXT:    [[AA:%.*]] = alloca i16, align 2
24848 // CHECK26-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
24849 // CHECK26-NEXT:    [[TMP:%.*]] = alloca i64, align 8
24850 // CHECK26-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
24851 // CHECK26-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
24852 // CHECK26-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
24853 // CHECK26-NEXT:    [[I:%.*]] = alloca i64, align 8
24854 // CHECK26-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24855 // CHECK26-NEXT:    store i32 0, i32* [[A]], align 4
24856 // CHECK26-NEXT:    store i16 0, i16* [[AA]], align 2
24857 // CHECK26-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
24858 // CHECK26-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
24859 // CHECK26-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
24860 // CHECK26-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
24861 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24862 // CHECK26:       omp.inner.for.cond:
24863 // CHECK26-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
24864 // CHECK26-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !21
24865 // CHECK26-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
24866 // CHECK26-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24867 // CHECK26:       omp.inner.for.body:
24868 // CHECK26-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
24869 // CHECK26-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
24870 // CHECK26-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
24871 // CHECK26-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !21
24872 // CHECK26-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !21
24873 // CHECK26-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
24874 // CHECK26-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !21
24875 // CHECK26-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !21
24876 // CHECK26-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
24877 // CHECK26-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
24878 // CHECK26-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
24879 // CHECK26-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !21
24880 // CHECK26-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
24881 // CHECK26-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
24882 // CHECK26-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
24883 // CHECK26-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
24884 // CHECK26-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24885 // CHECK26:       omp.body.continue:
24886 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24887 // CHECK26:       omp.inner.for.inc:
24888 // CHECK26-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
24889 // CHECK26-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
24890 // CHECK26-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !21
24891 // CHECK26-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
24892 // CHECK26:       omp.inner.for.end:
24893 // CHECK26-NEXT:    store i64 11, i64* [[I]], align 8
24894 // CHECK26-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
24895 // CHECK26-NEXT:    ret i32 [[TMP8]]
24896 //
24897 //
24898 // CHECK27-LABEL: define {{[^@]+}}@_Z7get_valv
24899 // CHECK27-SAME: () #[[ATTR0:[0-9]+]] {
24900 // CHECK27-NEXT:  entry:
24901 // CHECK27-NEXT:    ret i64 0
24902 //
24903 //
24904 // CHECK27-LABEL: define {{[^@]+}}@_Z3fooi
24905 // CHECK27-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
24906 // CHECK27-NEXT:  entry:
24907 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
24908 // CHECK27-NEXT:    [[A:%.*]] = alloca i32, align 4
24909 // CHECK27-NEXT:    [[AA:%.*]] = alloca i16, align 2
24910 // CHECK27-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
24911 // CHECK27-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
24912 // CHECK27-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
24913 // CHECK27-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
24914 // CHECK27-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
24915 // CHECK27-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
24916 // CHECK27-NEXT:    [[TMP:%.*]] = alloca i32, align 4
24917 // CHECK27-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
24918 // CHECK27-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
24919 // CHECK27-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
24920 // CHECK27-NEXT:    [[I:%.*]] = alloca i32, align 4
24921 // CHECK27-NEXT:    [[K:%.*]] = alloca i64, align 8
24922 // CHECK27-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
24923 // CHECK27-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
24924 // CHECK27-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
24925 // CHECK27-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
24926 // CHECK27-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
24927 // CHECK27-NEXT:    [[I7:%.*]] = alloca i32, align 4
24928 // CHECK27-NEXT:    [[K8:%.*]] = alloca i64, align 8
24929 // CHECK27-NEXT:    [[LIN:%.*]] = alloca i32, align 4
24930 // CHECK27-NEXT:    [[_TMP20:%.*]] = alloca i64, align 4
24931 // CHECK27-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
24932 // CHECK27-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
24933 // CHECK27-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
24934 // CHECK27-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
24935 // CHECK27-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
24936 // CHECK27-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
24937 // CHECK27-NEXT:    [[IT:%.*]] = alloca i64, align 8
24938 // CHECK27-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
24939 // CHECK27-NEXT:    [[A28:%.*]] = alloca i32, align 4
24940 // CHECK27-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
24941 // CHECK27-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
24942 // CHECK27-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
24943 // CHECK27-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
24944 // CHECK27-NEXT:    [[IT53:%.*]] = alloca i16, align 2
24945 // CHECK27-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
24946 // CHECK27-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
24947 // CHECK27-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
24948 // CHECK27-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
24949 // CHECK27-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
24950 // CHECK27-NEXT:    [[IT72:%.*]] = alloca i8, align 1
24951 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
24952 // CHECK27-NEXT:    store i32 0, i32* [[A]], align 4
24953 // CHECK27-NEXT:    store i16 0, i16* [[AA]], align 2
24954 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
24955 // CHECK27-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
24956 // CHECK27-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
24957 // CHECK27-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
24958 // CHECK27-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
24959 // CHECK27-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
24960 // CHECK27-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
24961 // CHECK27-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
24962 // CHECK27-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
24963 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
24964 // CHECK27-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
24965 // CHECK27-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
24966 // CHECK27-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
24967 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
24968 // CHECK27:       omp.inner.for.cond:
24969 // CHECK27-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
24970 // CHECK27-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
24971 // CHECK27-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
24972 // CHECK27-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
24973 // CHECK27:       omp.inner.for.body:
24974 // CHECK27-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
24975 // CHECK27-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
24976 // CHECK27-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
24977 // CHECK27-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
24978 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
24979 // CHECK27:       omp.body.continue:
24980 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
24981 // CHECK27:       omp.inner.for.inc:
24982 // CHECK27-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
24983 // CHECK27-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
24984 // CHECK27-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
24985 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
24986 // CHECK27:       omp.inner.for.end:
24987 // CHECK27-NEXT:    store i32 33, i32* [[I]], align 4
24988 // CHECK27-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
24989 // CHECK27-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
24990 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
24991 // CHECK27-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
24992 // CHECK27-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
24993 // CHECK27-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4
24994 // CHECK27-NEXT:    [[TMP10:%.*]] = load i64, i64* [[K]], align 8
24995 // CHECK27-NEXT:    store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8
24996 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
24997 // CHECK27:       omp.inner.for.cond9:
24998 // CHECK27-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
24999 // CHECK27-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
25000 // CHECK27-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
25001 // CHECK27-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
25002 // CHECK27:       omp.inner.for.body11:
25003 // CHECK27-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
25004 // CHECK27-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1
25005 // CHECK27-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
25006 // CHECK27-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7
25007 // CHECK27-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7
25008 // CHECK27-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
25009 // CHECK27-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3
25010 // CHECK27-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
25011 // CHECK27-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
25012 // CHECK27-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7
25013 // CHECK27-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7
25014 // CHECK27-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
25015 // CHECK27-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7
25016 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
25017 // CHECK27:       omp.body.continue16:
25018 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
25019 // CHECK27:       omp.inner.for.inc17:
25020 // CHECK27-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
25021 // CHECK27-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
25022 // CHECK27-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
25023 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]]
25024 // CHECK27:       omp.inner.for.end19:
25025 // CHECK27-NEXT:    store i32 1, i32* [[I7]], align 4
25026 // CHECK27-NEXT:    [[TMP18:%.*]] = load i64, i64* [[K8]], align 8
25027 // CHECK27-NEXT:    store i64 [[TMP18]], i64* [[K]], align 8
25028 // CHECK27-NEXT:    store i32 12, i32* [[LIN]], align 4
25029 // CHECK27-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
25030 // CHECK27-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
25031 // CHECK27-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
25032 // CHECK27-NEXT:    store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8
25033 // CHECK27-NEXT:    [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4
25034 // CHECK27-NEXT:    store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4
25035 // CHECK27-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
25036 // CHECK27-NEXT:    store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4
25037 // CHECK27-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
25038 // CHECK27-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
25039 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
25040 // CHECK27:       omp.inner.for.cond29:
25041 // CHECK27-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25042 // CHECK27-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10
25043 // CHECK27-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
25044 // CHECK27-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
25045 // CHECK27:       omp.inner.for.body31:
25046 // CHECK27-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25047 // CHECK27-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP24]], 400
25048 // CHECK27-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
25049 // CHECK27-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10
25050 // CHECK27-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10
25051 // CHECK27-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP25]] to i64
25052 // CHECK27-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25053 // CHECK27-NEXT:    [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
25054 // CHECK27-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]]
25055 // CHECK27-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
25056 // CHECK27-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
25057 // CHECK27-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10
25058 // CHECK27-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10
25059 // CHECK27-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP28]] to i64
25060 // CHECK27-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25061 // CHECK27-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
25062 // CHECK27-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]]
25063 // CHECK27-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
25064 // CHECK27-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
25065 // CHECK27-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10
25066 // CHECK27-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
25067 // CHECK27-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP31]] to i32
25068 // CHECK27-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
25069 // CHECK27-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
25070 // CHECK27-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10
25071 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
25072 // CHECK27:       omp.body.continue45:
25073 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
25074 // CHECK27:       omp.inner.for.inc46:
25075 // CHECK27-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25076 // CHECK27-NEXT:    [[ADD47:%.*]] = add i64 [[TMP32]], 1
25077 // CHECK27-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25078 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]]
25079 // CHECK27:       omp.inner.for.end48:
25080 // CHECK27-NEXT:    store i64 400, i64* [[IT]], align 8
25081 // CHECK27-NEXT:    [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4
25082 // CHECK27-NEXT:    store i32 [[TMP33]], i32* [[LIN]], align 4
25083 // CHECK27-NEXT:    [[TMP34:%.*]] = load i32, i32* [[A28]], align 4
25084 // CHECK27-NEXT:    store i32 [[TMP34]], i32* [[A]], align 4
25085 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
25086 // CHECK27-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
25087 // CHECK27-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
25088 // CHECK27-NEXT:    store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4
25089 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
25090 // CHECK27:       omp.inner.for.cond54:
25091 // CHECK27-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
25092 // CHECK27-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13
25093 // CHECK27-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]]
25094 // CHECK27-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
25095 // CHECK27:       omp.inner.for.body56:
25096 // CHECK27-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
25097 // CHECK27-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4
25098 // CHECK27-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
25099 // CHECK27-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
25100 // CHECK27-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13
25101 // CHECK27-NEXT:    [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
25102 // CHECK27-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1
25103 // CHECK27-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13
25104 // CHECK27-NEXT:    [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
25105 // CHECK27-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP40]] to i32
25106 // CHECK27-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
25107 // CHECK27-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
25108 // CHECK27-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13
25109 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
25110 // CHECK27:       omp.body.continue64:
25111 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
25112 // CHECK27:       omp.inner.for.inc65:
25113 // CHECK27-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
25114 // CHECK27-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1
25115 // CHECK27-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
25116 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]]
25117 // CHECK27:       omp.inner.for.end67:
25118 // CHECK27-NEXT:    store i16 22, i16* [[IT53]], align 2
25119 // CHECK27-NEXT:    [[TMP42:%.*]] = load i32, i32* [[A]], align 4
25120 // CHECK27-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4
25121 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
25122 // CHECK27-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
25123 // CHECK27-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
25124 // CHECK27-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4
25125 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
25126 // CHECK27:       omp.inner.for.cond73:
25127 // CHECK27-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
25128 // CHECK27-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16
25129 // CHECK27-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]]
25130 // CHECK27-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
25131 // CHECK27:       omp.inner.for.body75:
25132 // CHECK27-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
25133 // CHECK27-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1
25134 // CHECK27-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
25135 // CHECK27-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
25136 // CHECK27-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16
25137 // CHECK27-NEXT:    [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
25138 // CHECK27-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1
25139 // CHECK27-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16
25140 // CHECK27-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
25141 // CHECK27-NEXT:    [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
25142 // CHECK27-NEXT:    [[CONV80:%.*]] = fpext float [[TMP48]] to double
25143 // CHECK27-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
25144 // CHECK27-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
25145 // CHECK27-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
25146 // CHECK27-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
25147 // CHECK27-NEXT:    [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
25148 // CHECK27-NEXT:    [[CONV84:%.*]] = fpext float [[TMP49]] to double
25149 // CHECK27-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
25150 // CHECK27-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
25151 // CHECK27-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
25152 // CHECK27-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
25153 // CHECK27-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2
25154 // CHECK27-NEXT:    [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
25155 // CHECK27-NEXT:    [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00
25156 // CHECK27-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
25157 // CHECK27-NEXT:    [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]]
25158 // CHECK27-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]]
25159 // CHECK27-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3
25160 // CHECK27-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
25161 // CHECK27-NEXT:    [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00
25162 // CHECK27-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
25163 // CHECK27-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
25164 // CHECK27-NEXT:    [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
25165 // CHECK27-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1
25166 // CHECK27-NEXT:    store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16
25167 // CHECK27-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
25168 // CHECK27-NEXT:    [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
25169 // CHECK27-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP54]] to i32
25170 // CHECK27-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
25171 // CHECK27-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
25172 // CHECK27-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16
25173 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
25174 // CHECK27:       omp.body.continue97:
25175 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
25176 // CHECK27:       omp.inner.for.inc98:
25177 // CHECK27-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
25178 // CHECK27-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1
25179 // CHECK27-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
25180 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]]
25181 // CHECK27:       omp.inner.for.end100:
25182 // CHECK27-NEXT:    store i8 96, i8* [[IT72]], align 1
25183 // CHECK27-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
25184 // CHECK27-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
25185 // CHECK27-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
25186 // CHECK27-NEXT:    ret i32 [[TMP56]]
25187 //
25188 //
25189 // CHECK27-LABEL: define {{[^@]+}}@_Z3bari
25190 // CHECK27-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
25191 // CHECK27-NEXT:  entry:
25192 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25193 // CHECK27-NEXT:    [[A:%.*]] = alloca i32, align 4
25194 // CHECK27-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
25195 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25196 // CHECK27-NEXT:    store i32 0, i32* [[A]], align 4
25197 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
25198 // CHECK27-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
25199 // CHECK27-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
25200 // CHECK27-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
25201 // CHECK27-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
25202 // CHECK27-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25203 // CHECK27-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
25204 // CHECK27-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
25205 // CHECK27-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
25206 // CHECK27-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
25207 // CHECK27-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
25208 // CHECK27-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
25209 // CHECK27-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
25210 // CHECK27-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
25211 // CHECK27-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
25212 // CHECK27-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
25213 // CHECK27-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
25214 // CHECK27-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
25215 // CHECK27-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
25216 // CHECK27-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
25217 // CHECK27-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
25218 // CHECK27-NEXT:    ret i32 [[TMP8]]
25219 //
25220 //
25221 // CHECK27-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
25222 // CHECK27-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
25223 // CHECK27-NEXT:  entry:
25224 // CHECK27-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
25225 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25226 // CHECK27-NEXT:    [[B:%.*]] = alloca i32, align 4
25227 // CHECK27-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
25228 // CHECK27-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
25229 // CHECK27-NEXT:    [[TMP:%.*]] = alloca i64, align 4
25230 // CHECK27-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
25231 // CHECK27-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
25232 // CHECK27-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
25233 // CHECK27-NEXT:    [[IT:%.*]] = alloca i64, align 8
25234 // CHECK27-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
25235 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25236 // CHECK27-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
25237 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
25238 // CHECK27-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
25239 // CHECK27-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
25240 // CHECK27-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
25241 // CHECK27-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
25242 // CHECK27-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
25243 // CHECK27-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
25244 // CHECK27-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
25245 // CHECK27-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
25246 // CHECK27-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
25247 // CHECK27-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
25248 // CHECK27-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
25249 // CHECK27-NEXT:    store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8
25250 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25251 // CHECK27:       omp.inner.for.cond:
25252 // CHECK27-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
25253 // CHECK27-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19
25254 // CHECK27-NEXT:    [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]]
25255 // CHECK27-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25256 // CHECK27:       omp.inner.for.body:
25257 // CHECK27-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
25258 // CHECK27-NEXT:    [[MUL:%.*]] = mul i64 [[TMP7]], 400
25259 // CHECK27-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
25260 // CHECK27-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19
25261 // CHECK27-NEXT:    [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
25262 // CHECK27-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP8]] to double
25263 // CHECK27-NEXT:    [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
25264 // CHECK27-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
25265 // CHECK27-NEXT:    store double [[ADD2]], double* [[A]], align 4, !llvm.access.group !19
25266 // CHECK27-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
25267 // CHECK27-NEXT:    [[TMP9:%.*]] = load double, double* [[A3]], align 4, !llvm.access.group !19
25268 // CHECK27-NEXT:    [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00
25269 // CHECK27-NEXT:    store double [[INC]], double* [[A3]], align 4, !llvm.access.group !19
25270 // CHECK27-NEXT:    [[CONV4:%.*]] = fptosi double [[INC]] to i16
25271 // CHECK27-NEXT:    [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]]
25272 // CHECK27-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]]
25273 // CHECK27-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
25274 // CHECK27-NEXT:    store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !19
25275 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25276 // CHECK27:       omp.body.continue:
25277 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25278 // CHECK27:       omp.inner.for.inc:
25279 // CHECK27-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
25280 // CHECK27-NEXT:    [[ADD6:%.*]] = add i64 [[TMP11]], 1
25281 // CHECK27-NEXT:    store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
25282 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
25283 // CHECK27:       omp.inner.for.end:
25284 // CHECK27-NEXT:    store i64 400, i64* [[IT]], align 8
25285 // CHECK27-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
25286 // CHECK27-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
25287 // CHECK27-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i32 1
25288 // CHECK27-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2
25289 // CHECK27-NEXT:    [[CONV9:%.*]] = sext i16 [[TMP13]] to i32
25290 // CHECK27-NEXT:    [[TMP14:%.*]] = load i32, i32* [[B]], align 4
25291 // CHECK27-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]]
25292 // CHECK27-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
25293 // CHECK27-NEXT:    call void @llvm.stackrestore(i8* [[TMP15]])
25294 // CHECK27-NEXT:    ret i32 [[ADD10]]
25295 //
25296 //
25297 // CHECK27-LABEL: define {{[^@]+}}@_ZL7fstatici
25298 // CHECK27-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
25299 // CHECK27-NEXT:  entry:
25300 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25301 // CHECK27-NEXT:    [[A:%.*]] = alloca i32, align 4
25302 // CHECK27-NEXT:    [[AA:%.*]] = alloca i16, align 2
25303 // CHECK27-NEXT:    [[AAA:%.*]] = alloca i8, align 1
25304 // CHECK27-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
25305 // CHECK27-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25306 // CHECK27-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25307 // CHECK27-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25308 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25309 // CHECK27-NEXT:    store i32 0, i32* [[A]], align 4
25310 // CHECK27-NEXT:    store i16 0, i16* [[AA]], align 2
25311 // CHECK27-NEXT:    store i8 0, i8* [[AAA]], align 1
25312 // CHECK27-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25313 // CHECK27-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
25314 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
25315 // CHECK27-NEXT:    ret i32 [[TMP0]]
25316 //
25317 //
25318 // CHECK27-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
25319 // CHECK27-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
25320 // CHECK27-NEXT:  entry:
25321 // CHECK27-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25322 // CHECK27-NEXT:    [[A:%.*]] = alloca i32, align 4
25323 // CHECK27-NEXT:    [[AA:%.*]] = alloca i16, align 2
25324 // CHECK27-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
25325 // CHECK27-NEXT:    [[TMP:%.*]] = alloca i64, align 4
25326 // CHECK27-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
25327 // CHECK27-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
25328 // CHECK27-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
25329 // CHECK27-NEXT:    [[I:%.*]] = alloca i64, align 8
25330 // CHECK27-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25331 // CHECK27-NEXT:    store i32 0, i32* [[A]], align 4
25332 // CHECK27-NEXT:    store i16 0, i16* [[AA]], align 2
25333 // CHECK27-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
25334 // CHECK27-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
25335 // CHECK27-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
25336 // CHECK27-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
25337 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25338 // CHECK27:       omp.inner.for.cond:
25339 // CHECK27-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
25340 // CHECK27-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !22
25341 // CHECK27-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
25342 // CHECK27-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25343 // CHECK27:       omp.inner.for.body:
25344 // CHECK27-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
25345 // CHECK27-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
25346 // CHECK27-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
25347 // CHECK27-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !22
25348 // CHECK27-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22
25349 // CHECK27-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
25350 // CHECK27-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !22
25351 // CHECK27-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22
25352 // CHECK27-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
25353 // CHECK27-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
25354 // CHECK27-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
25355 // CHECK27-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !22
25356 // CHECK27-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
25357 // CHECK27-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
25358 // CHECK27-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
25359 // CHECK27-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
25360 // CHECK27-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25361 // CHECK27:       omp.body.continue:
25362 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25363 // CHECK27:       omp.inner.for.inc:
25364 // CHECK27-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
25365 // CHECK27-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
25366 // CHECK27-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
25367 // CHECK27-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
25368 // CHECK27:       omp.inner.for.end:
25369 // CHECK27-NEXT:    store i64 11, i64* [[I]], align 8
25370 // CHECK27-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
25371 // CHECK27-NEXT:    ret i32 [[TMP8]]
25372 //
25373 //
25374 // CHECK28-LABEL: define {{[^@]+}}@_Z7get_valv
25375 // CHECK28-SAME: () #[[ATTR0:[0-9]+]] {
25376 // CHECK28-NEXT:  entry:
25377 // CHECK28-NEXT:    ret i64 0
25378 //
25379 //
25380 // CHECK28-LABEL: define {{[^@]+}}@_Z3fooi
25381 // CHECK28-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
25382 // CHECK28-NEXT:  entry:
25383 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25384 // CHECK28-NEXT:    [[A:%.*]] = alloca i32, align 4
25385 // CHECK28-NEXT:    [[AA:%.*]] = alloca i16, align 2
25386 // CHECK28-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
25387 // CHECK28-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
25388 // CHECK28-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
25389 // CHECK28-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
25390 // CHECK28-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
25391 // CHECK28-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
25392 // CHECK28-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25393 // CHECK28-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25394 // CHECK28-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25395 // CHECK28-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25396 // CHECK28-NEXT:    [[I:%.*]] = alloca i32, align 4
25397 // CHECK28-NEXT:    [[K:%.*]] = alloca i64, align 8
25398 // CHECK28-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
25399 // CHECK28-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
25400 // CHECK28-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
25401 // CHECK28-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
25402 // CHECK28-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
25403 // CHECK28-NEXT:    [[I7:%.*]] = alloca i32, align 4
25404 // CHECK28-NEXT:    [[K8:%.*]] = alloca i64, align 8
25405 // CHECK28-NEXT:    [[LIN:%.*]] = alloca i32, align 4
25406 // CHECK28-NEXT:    [[_TMP20:%.*]] = alloca i64, align 4
25407 // CHECK28-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
25408 // CHECK28-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
25409 // CHECK28-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
25410 // CHECK28-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
25411 // CHECK28-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
25412 // CHECK28-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
25413 // CHECK28-NEXT:    [[IT:%.*]] = alloca i64, align 8
25414 // CHECK28-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
25415 // CHECK28-NEXT:    [[A28:%.*]] = alloca i32, align 4
25416 // CHECK28-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
25417 // CHECK28-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
25418 // CHECK28-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
25419 // CHECK28-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
25420 // CHECK28-NEXT:    [[IT53:%.*]] = alloca i16, align 2
25421 // CHECK28-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25422 // CHECK28-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
25423 // CHECK28-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
25424 // CHECK28-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
25425 // CHECK28-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
25426 // CHECK28-NEXT:    [[IT72:%.*]] = alloca i8, align 1
25427 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25428 // CHECK28-NEXT:    store i32 0, i32* [[A]], align 4
25429 // CHECK28-NEXT:    store i16 0, i16* [[AA]], align 2
25430 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
25431 // CHECK28-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
25432 // CHECK28-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
25433 // CHECK28-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
25434 // CHECK28-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
25435 // CHECK28-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25436 // CHECK28-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
25437 // CHECK28-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
25438 // CHECK28-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
25439 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25440 // CHECK28-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
25441 // CHECK28-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25442 // CHECK28-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
25443 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25444 // CHECK28:       omp.inner.for.cond:
25445 // CHECK28-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
25446 // CHECK28-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
25447 // CHECK28-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
25448 // CHECK28-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25449 // CHECK28:       omp.inner.for.body:
25450 // CHECK28-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
25451 // CHECK28-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
25452 // CHECK28-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
25453 // CHECK28-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
25454 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25455 // CHECK28:       omp.body.continue:
25456 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25457 // CHECK28:       omp.inner.for.inc:
25458 // CHECK28-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
25459 // CHECK28-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
25460 // CHECK28-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
25461 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
25462 // CHECK28:       omp.inner.for.end:
25463 // CHECK28-NEXT:    store i32 33, i32* [[I]], align 4
25464 // CHECK28-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
25465 // CHECK28-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
25466 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
25467 // CHECK28-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
25468 // CHECK28-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
25469 // CHECK28-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4
25470 // CHECK28-NEXT:    [[TMP10:%.*]] = load i64, i64* [[K]], align 8
25471 // CHECK28-NEXT:    store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8
25472 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
25473 // CHECK28:       omp.inner.for.cond9:
25474 // CHECK28-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
25475 // CHECK28-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
25476 // CHECK28-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
25477 // CHECK28-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
25478 // CHECK28:       omp.inner.for.body11:
25479 // CHECK28-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
25480 // CHECK28-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1
25481 // CHECK28-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
25482 // CHECK28-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7
25483 // CHECK28-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7
25484 // CHECK28-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
25485 // CHECK28-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3
25486 // CHECK28-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
25487 // CHECK28-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
25488 // CHECK28-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7
25489 // CHECK28-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7
25490 // CHECK28-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
25491 // CHECK28-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7
25492 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
25493 // CHECK28:       omp.body.continue16:
25494 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
25495 // CHECK28:       omp.inner.for.inc17:
25496 // CHECK28-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
25497 // CHECK28-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
25498 // CHECK28-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
25499 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]]
25500 // CHECK28:       omp.inner.for.end19:
25501 // CHECK28-NEXT:    store i32 1, i32* [[I7]], align 4
25502 // CHECK28-NEXT:    [[TMP18:%.*]] = load i64, i64* [[K8]], align 8
25503 // CHECK28-NEXT:    store i64 [[TMP18]], i64* [[K]], align 8
25504 // CHECK28-NEXT:    store i32 12, i32* [[LIN]], align 4
25505 // CHECK28-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
25506 // CHECK28-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
25507 // CHECK28-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
25508 // CHECK28-NEXT:    store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8
25509 // CHECK28-NEXT:    [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4
25510 // CHECK28-NEXT:    store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4
25511 // CHECK28-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
25512 // CHECK28-NEXT:    store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4
25513 // CHECK28-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
25514 // CHECK28-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
25515 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
25516 // CHECK28:       omp.inner.for.cond29:
25517 // CHECK28-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25518 // CHECK28-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10
25519 // CHECK28-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
25520 // CHECK28-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
25521 // CHECK28:       omp.inner.for.body31:
25522 // CHECK28-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25523 // CHECK28-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP24]], 400
25524 // CHECK28-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
25525 // CHECK28-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10
25526 // CHECK28-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10
25527 // CHECK28-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP25]] to i64
25528 // CHECK28-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25529 // CHECK28-NEXT:    [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
25530 // CHECK28-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]]
25531 // CHECK28-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
25532 // CHECK28-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
25533 // CHECK28-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10
25534 // CHECK28-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10
25535 // CHECK28-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP28]] to i64
25536 // CHECK28-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25537 // CHECK28-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
25538 // CHECK28-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]]
25539 // CHECK28-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
25540 // CHECK28-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
25541 // CHECK28-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10
25542 // CHECK28-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
25543 // CHECK28-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP31]] to i32
25544 // CHECK28-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
25545 // CHECK28-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
25546 // CHECK28-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10
25547 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
25548 // CHECK28:       omp.body.continue45:
25549 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
25550 // CHECK28:       omp.inner.for.inc46:
25551 // CHECK28-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25552 // CHECK28-NEXT:    [[ADD47:%.*]] = add i64 [[TMP32]], 1
25553 // CHECK28-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
25554 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]]
25555 // CHECK28:       omp.inner.for.end48:
25556 // CHECK28-NEXT:    store i64 400, i64* [[IT]], align 8
25557 // CHECK28-NEXT:    [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4
25558 // CHECK28-NEXT:    store i32 [[TMP33]], i32* [[LIN]], align 4
25559 // CHECK28-NEXT:    [[TMP34:%.*]] = load i32, i32* [[A28]], align 4
25560 // CHECK28-NEXT:    store i32 [[TMP34]], i32* [[A]], align 4
25561 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
25562 // CHECK28-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
25563 // CHECK28-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
25564 // CHECK28-NEXT:    store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4
25565 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
25566 // CHECK28:       omp.inner.for.cond54:
25567 // CHECK28-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
25568 // CHECK28-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13
25569 // CHECK28-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]]
25570 // CHECK28-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
25571 // CHECK28:       omp.inner.for.body56:
25572 // CHECK28-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
25573 // CHECK28-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4
25574 // CHECK28-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
25575 // CHECK28-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
25576 // CHECK28-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13
25577 // CHECK28-NEXT:    [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
25578 // CHECK28-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1
25579 // CHECK28-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13
25580 // CHECK28-NEXT:    [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
25581 // CHECK28-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP40]] to i32
25582 // CHECK28-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
25583 // CHECK28-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
25584 // CHECK28-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13
25585 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
25586 // CHECK28:       omp.body.continue64:
25587 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
25588 // CHECK28:       omp.inner.for.inc65:
25589 // CHECK28-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
25590 // CHECK28-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1
25591 // CHECK28-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
25592 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]]
25593 // CHECK28:       omp.inner.for.end67:
25594 // CHECK28-NEXT:    store i16 22, i16* [[IT53]], align 2
25595 // CHECK28-NEXT:    [[TMP42:%.*]] = load i32, i32* [[A]], align 4
25596 // CHECK28-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4
25597 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
25598 // CHECK28-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
25599 // CHECK28-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
25600 // CHECK28-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4
25601 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
25602 // CHECK28:       omp.inner.for.cond73:
25603 // CHECK28-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
25604 // CHECK28-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16
25605 // CHECK28-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]]
25606 // CHECK28-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
25607 // CHECK28:       omp.inner.for.body75:
25608 // CHECK28-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
25609 // CHECK28-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1
25610 // CHECK28-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
25611 // CHECK28-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
25612 // CHECK28-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16
25613 // CHECK28-NEXT:    [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
25614 // CHECK28-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1
25615 // CHECK28-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16
25616 // CHECK28-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
25617 // CHECK28-NEXT:    [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
25618 // CHECK28-NEXT:    [[CONV80:%.*]] = fpext float [[TMP48]] to double
25619 // CHECK28-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
25620 // CHECK28-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
25621 // CHECK28-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
25622 // CHECK28-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
25623 // CHECK28-NEXT:    [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
25624 // CHECK28-NEXT:    [[CONV84:%.*]] = fpext float [[TMP49]] to double
25625 // CHECK28-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
25626 // CHECK28-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
25627 // CHECK28-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
25628 // CHECK28-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
25629 // CHECK28-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2
25630 // CHECK28-NEXT:    [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
25631 // CHECK28-NEXT:    [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00
25632 // CHECK28-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
25633 // CHECK28-NEXT:    [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]]
25634 // CHECK28-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]]
25635 // CHECK28-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3
25636 // CHECK28-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
25637 // CHECK28-NEXT:    [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00
25638 // CHECK28-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
25639 // CHECK28-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
25640 // CHECK28-NEXT:    [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
25641 // CHECK28-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1
25642 // CHECK28-NEXT:    store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16
25643 // CHECK28-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
25644 // CHECK28-NEXT:    [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
25645 // CHECK28-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP54]] to i32
25646 // CHECK28-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
25647 // CHECK28-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
25648 // CHECK28-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16
25649 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
25650 // CHECK28:       omp.body.continue97:
25651 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
25652 // CHECK28:       omp.inner.for.inc98:
25653 // CHECK28-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
25654 // CHECK28-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1
25655 // CHECK28-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
25656 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]]
25657 // CHECK28:       omp.inner.for.end100:
25658 // CHECK28-NEXT:    store i8 96, i8* [[IT72]], align 1
25659 // CHECK28-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
25660 // CHECK28-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
25661 // CHECK28-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
25662 // CHECK28-NEXT:    ret i32 [[TMP56]]
25663 //
25664 //
25665 // CHECK28-LABEL: define {{[^@]+}}@_Z3bari
25666 // CHECK28-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
25667 // CHECK28-NEXT:  entry:
25668 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25669 // CHECK28-NEXT:    [[A:%.*]] = alloca i32, align 4
25670 // CHECK28-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
25671 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25672 // CHECK28-NEXT:    store i32 0, i32* [[A]], align 4
25673 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
25674 // CHECK28-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
25675 // CHECK28-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
25676 // CHECK28-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
25677 // CHECK28-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
25678 // CHECK28-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
25679 // CHECK28-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
25680 // CHECK28-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
25681 // CHECK28-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
25682 // CHECK28-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
25683 // CHECK28-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
25684 // CHECK28-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
25685 // CHECK28-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
25686 // CHECK28-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
25687 // CHECK28-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
25688 // CHECK28-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
25689 // CHECK28-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
25690 // CHECK28-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
25691 // CHECK28-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
25692 // CHECK28-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
25693 // CHECK28-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
25694 // CHECK28-NEXT:    ret i32 [[TMP8]]
25695 //
25696 //
25697 // CHECK28-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
25698 // CHECK28-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
25699 // CHECK28-NEXT:  entry:
25700 // CHECK28-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
25701 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25702 // CHECK28-NEXT:    [[B:%.*]] = alloca i32, align 4
25703 // CHECK28-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
25704 // CHECK28-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
25705 // CHECK28-NEXT:    [[TMP:%.*]] = alloca i64, align 4
25706 // CHECK28-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
25707 // CHECK28-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
25708 // CHECK28-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
25709 // CHECK28-NEXT:    [[IT:%.*]] = alloca i64, align 8
25710 // CHECK28-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
25711 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25712 // CHECK28-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
25713 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
25714 // CHECK28-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
25715 // CHECK28-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
25716 // CHECK28-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
25717 // CHECK28-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
25718 // CHECK28-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
25719 // CHECK28-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
25720 // CHECK28-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
25721 // CHECK28-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
25722 // CHECK28-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
25723 // CHECK28-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
25724 // CHECK28-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
25725 // CHECK28-NEXT:    store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8
25726 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25727 // CHECK28:       omp.inner.for.cond:
25728 // CHECK28-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
25729 // CHECK28-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19
25730 // CHECK28-NEXT:    [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]]
25731 // CHECK28-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25732 // CHECK28:       omp.inner.for.body:
25733 // CHECK28-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
25734 // CHECK28-NEXT:    [[MUL:%.*]] = mul i64 [[TMP7]], 400
25735 // CHECK28-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
25736 // CHECK28-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19
25737 // CHECK28-NEXT:    [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
25738 // CHECK28-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP8]] to double
25739 // CHECK28-NEXT:    [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00
25740 // CHECK28-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
25741 // CHECK28-NEXT:    store double [[ADD2]], double* [[A]], align 4, !llvm.access.group !19
25742 // CHECK28-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
25743 // CHECK28-NEXT:    [[TMP9:%.*]] = load double, double* [[A3]], align 4, !llvm.access.group !19
25744 // CHECK28-NEXT:    [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00
25745 // CHECK28-NEXT:    store double [[INC]], double* [[A3]], align 4, !llvm.access.group !19
25746 // CHECK28-NEXT:    [[CONV4:%.*]] = fptosi double [[INC]] to i16
25747 // CHECK28-NEXT:    [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]]
25748 // CHECK28-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]]
25749 // CHECK28-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
25750 // CHECK28-NEXT:    store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group !19
25751 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25752 // CHECK28:       omp.body.continue:
25753 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25754 // CHECK28:       omp.inner.for.inc:
25755 // CHECK28-NEXT:    [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
25756 // CHECK28-NEXT:    [[ADD6:%.*]] = add i64 [[TMP11]], 1
25757 // CHECK28-NEXT:    store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
25758 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
25759 // CHECK28:       omp.inner.for.end:
25760 // CHECK28-NEXT:    store i64 400, i64* [[IT]], align 8
25761 // CHECK28-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
25762 // CHECK28-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
25763 // CHECK28-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i32 1
25764 // CHECK28-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2
25765 // CHECK28-NEXT:    [[CONV9:%.*]] = sext i16 [[TMP13]] to i32
25766 // CHECK28-NEXT:    [[TMP14:%.*]] = load i32, i32* [[B]], align 4
25767 // CHECK28-NEXT:    [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]]
25768 // CHECK28-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
25769 // CHECK28-NEXT:    call void @llvm.stackrestore(i8* [[TMP15]])
25770 // CHECK28-NEXT:    ret i32 [[ADD10]]
25771 //
25772 //
25773 // CHECK28-LABEL: define {{[^@]+}}@_ZL7fstatici
25774 // CHECK28-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
25775 // CHECK28-NEXT:  entry:
25776 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25777 // CHECK28-NEXT:    [[A:%.*]] = alloca i32, align 4
25778 // CHECK28-NEXT:    [[AA:%.*]] = alloca i16, align 2
25779 // CHECK28-NEXT:    [[AAA:%.*]] = alloca i8, align 1
25780 // CHECK28-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
25781 // CHECK28-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25782 // CHECK28-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25783 // CHECK28-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25784 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25785 // CHECK28-NEXT:    store i32 0, i32* [[A]], align 4
25786 // CHECK28-NEXT:    store i16 0, i16* [[AA]], align 2
25787 // CHECK28-NEXT:    store i8 0, i8* [[AAA]], align 1
25788 // CHECK28-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25789 // CHECK28-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
25790 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
25791 // CHECK28-NEXT:    ret i32 [[TMP0]]
25792 //
25793 //
25794 // CHECK28-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
25795 // CHECK28-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
25796 // CHECK28-NEXT:  entry:
25797 // CHECK28-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25798 // CHECK28-NEXT:    [[A:%.*]] = alloca i32, align 4
25799 // CHECK28-NEXT:    [[AA:%.*]] = alloca i16, align 2
25800 // CHECK28-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
25801 // CHECK28-NEXT:    [[TMP:%.*]] = alloca i64, align 4
25802 // CHECK28-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
25803 // CHECK28-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
25804 // CHECK28-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
25805 // CHECK28-NEXT:    [[I:%.*]] = alloca i64, align 8
25806 // CHECK28-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25807 // CHECK28-NEXT:    store i32 0, i32* [[A]], align 4
25808 // CHECK28-NEXT:    store i16 0, i16* [[AA]], align 2
25809 // CHECK28-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
25810 // CHECK28-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
25811 // CHECK28-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
25812 // CHECK28-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
25813 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25814 // CHECK28:       omp.inner.for.cond:
25815 // CHECK28-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
25816 // CHECK28-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !22
25817 // CHECK28-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
25818 // CHECK28-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25819 // CHECK28:       omp.inner.for.body:
25820 // CHECK28-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
25821 // CHECK28-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
25822 // CHECK28-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
25823 // CHECK28-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !22
25824 // CHECK28-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !22
25825 // CHECK28-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
25826 // CHECK28-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !22
25827 // CHECK28-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !22
25828 // CHECK28-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
25829 // CHECK28-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
25830 // CHECK28-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
25831 // CHECK28-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !22
25832 // CHECK28-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
25833 // CHECK28-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
25834 // CHECK28-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
25835 // CHECK28-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
25836 // CHECK28-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25837 // CHECK28:       omp.body.continue:
25838 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25839 // CHECK28:       omp.inner.for.inc:
25840 // CHECK28-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
25841 // CHECK28-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
25842 // CHECK28-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !22
25843 // CHECK28-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
25844 // CHECK28:       omp.inner.for.end:
25845 // CHECK28-NEXT:    store i64 11, i64* [[I]], align 8
25846 // CHECK28-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
25847 // CHECK28-NEXT:    ret i32 [[TMP8]]
25848 //
25849 //
25850 // CHECK29-LABEL: define {{[^@]+}}@_Z7get_valv
25851 // CHECK29-SAME: () #[[ATTR0:[0-9]+]] {
25852 // CHECK29-NEXT:  entry:
25853 // CHECK29-NEXT:    ret i64 0
25854 //
25855 //
25856 // CHECK29-LABEL: define {{[^@]+}}@_Z3fooi
25857 // CHECK29-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
25858 // CHECK29-NEXT:  entry:
25859 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
25860 // CHECK29-NEXT:    [[A:%.*]] = alloca i32, align 4
25861 // CHECK29-NEXT:    [[AA:%.*]] = alloca i16, align 2
25862 // CHECK29-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
25863 // CHECK29-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
25864 // CHECK29-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
25865 // CHECK29-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
25866 // CHECK29-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
25867 // CHECK29-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
25868 // CHECK29-NEXT:    [[TMP:%.*]] = alloca i32, align 4
25869 // CHECK29-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
25870 // CHECK29-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
25871 // CHECK29-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
25872 // CHECK29-NEXT:    [[I:%.*]] = alloca i32, align 4
25873 // CHECK29-NEXT:    [[K:%.*]] = alloca i64, align 8
25874 // CHECK29-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
25875 // CHECK29-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
25876 // CHECK29-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
25877 // CHECK29-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
25878 // CHECK29-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
25879 // CHECK29-NEXT:    [[I7:%.*]] = alloca i32, align 4
25880 // CHECK29-NEXT:    [[K8:%.*]] = alloca i64, align 8
25881 // CHECK29-NEXT:    [[LIN:%.*]] = alloca i32, align 4
25882 // CHECK29-NEXT:    [[_TMP20:%.*]] = alloca i64, align 8
25883 // CHECK29-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
25884 // CHECK29-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
25885 // CHECK29-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
25886 // CHECK29-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
25887 // CHECK29-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
25888 // CHECK29-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
25889 // CHECK29-NEXT:    [[IT:%.*]] = alloca i64, align 8
25890 // CHECK29-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
25891 // CHECK29-NEXT:    [[A28:%.*]] = alloca i32, align 4
25892 // CHECK29-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
25893 // CHECK29-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
25894 // CHECK29-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
25895 // CHECK29-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
25896 // CHECK29-NEXT:    [[IT53:%.*]] = alloca i16, align 2
25897 // CHECK29-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
25898 // CHECK29-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
25899 // CHECK29-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
25900 // CHECK29-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
25901 // CHECK29-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
25902 // CHECK29-NEXT:    [[IT72:%.*]] = alloca i8, align 1
25903 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
25904 // CHECK29-NEXT:    store i32 0, i32* [[A]], align 4
25905 // CHECK29-NEXT:    store i16 0, i16* [[AA]], align 2
25906 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
25907 // CHECK29-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
25908 // CHECK29-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
25909 // CHECK29-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
25910 // CHECK29-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
25911 // CHECK29-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
25912 // CHECK29-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
25913 // CHECK29-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
25914 // CHECK29-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
25915 // CHECK29-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
25916 // CHECK29-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
25917 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
25918 // CHECK29-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
25919 // CHECK29-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
25920 // CHECK29-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
25921 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
25922 // CHECK29:       omp.inner.for.cond:
25923 // CHECK29-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
25924 // CHECK29-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
25925 // CHECK29-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
25926 // CHECK29-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
25927 // CHECK29:       omp.inner.for.body:
25928 // CHECK29-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
25929 // CHECK29-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5
25930 // CHECK29-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
25931 // CHECK29-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
25932 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
25933 // CHECK29:       omp.body.continue:
25934 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
25935 // CHECK29:       omp.inner.for.inc:
25936 // CHECK29-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
25937 // CHECK29-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
25938 // CHECK29-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
25939 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
25940 // CHECK29:       omp.inner.for.end:
25941 // CHECK29-NEXT:    store i32 33, i32* [[I]], align 4
25942 // CHECK29-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
25943 // CHECK29-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
25944 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
25945 // CHECK29-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
25946 // CHECK29-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
25947 // CHECK29-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4
25948 // CHECK29-NEXT:    [[TMP12:%.*]] = load i64, i64* [[K]], align 8
25949 // CHECK29-NEXT:    store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8
25950 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
25951 // CHECK29:       omp.inner.for.cond9:
25952 // CHECK29-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
25953 // CHECK29-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
25954 // CHECK29-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
25955 // CHECK29-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
25956 // CHECK29:       omp.inner.for.body11:
25957 // CHECK29-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
25958 // CHECK29-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1
25959 // CHECK29-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
25960 // CHECK29-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6
25961 // CHECK29-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6
25962 // CHECK29-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
25963 // CHECK29-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3
25964 // CHECK29-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
25965 // CHECK29-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]]
25966 // CHECK29-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6
25967 // CHECK29-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6
25968 // CHECK29-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
25969 // CHECK29-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6
25970 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
25971 // CHECK29:       omp.body.continue16:
25972 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
25973 // CHECK29:       omp.inner.for.inc17:
25974 // CHECK29-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
25975 // CHECK29-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
25976 // CHECK29-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
25977 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]]
25978 // CHECK29:       omp.inner.for.end19:
25979 // CHECK29-NEXT:    store i32 1, i32* [[I7]], align 4
25980 // CHECK29-NEXT:    [[TMP20:%.*]] = load i64, i64* [[K8]], align 8
25981 // CHECK29-NEXT:    store i64 [[TMP20]], i64* [[K]], align 8
25982 // CHECK29-NEXT:    store i32 12, i32* [[LIN]], align 4
25983 // CHECK29-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
25984 // CHECK29-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
25985 // CHECK29-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
25986 // CHECK29-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8
25987 // CHECK29-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4
25988 // CHECK29-NEXT:    store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4
25989 // CHECK29-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A]], align 4
25990 // CHECK29-NEXT:    store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4
25991 // CHECK29-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
25992 // CHECK29-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
25993 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
25994 // CHECK29:       omp.inner.for.cond29:
25995 // CHECK29-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
25996 // CHECK29-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9
25997 // CHECK29-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]]
25998 // CHECK29-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
25999 // CHECK29:       omp.inner.for.body31:
26000 // CHECK29-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26001 // CHECK29-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP26]], 400
26002 // CHECK29-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
26003 // CHECK29-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9
26004 // CHECK29-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9
26005 // CHECK29-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP27]] to i64
26006 // CHECK29-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26007 // CHECK29-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
26008 // CHECK29-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]]
26009 // CHECK29-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
26010 // CHECK29-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
26011 // CHECK29-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9
26012 // CHECK29-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9
26013 // CHECK29-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP30]] to i64
26014 // CHECK29-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26015 // CHECK29-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
26016 // CHECK29-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]]
26017 // CHECK29-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
26018 // CHECK29-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
26019 // CHECK29-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9
26020 // CHECK29-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
26021 // CHECK29-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP33]] to i32
26022 // CHECK29-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
26023 // CHECK29-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
26024 // CHECK29-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9
26025 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
26026 // CHECK29:       omp.body.continue45:
26027 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
26028 // CHECK29:       omp.inner.for.inc46:
26029 // CHECK29-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26030 // CHECK29-NEXT:    [[ADD47:%.*]] = add i64 [[TMP34]], 1
26031 // CHECK29-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26032 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]]
26033 // CHECK29:       omp.inner.for.end48:
26034 // CHECK29-NEXT:    store i64 400, i64* [[IT]], align 8
26035 // CHECK29-NEXT:    [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4
26036 // CHECK29-NEXT:    store i32 [[TMP35]], i32* [[LIN]], align 4
26037 // CHECK29-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A28]], align 4
26038 // CHECK29-NEXT:    store i32 [[TMP36]], i32* [[A]], align 4
26039 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
26040 // CHECK29-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
26041 // CHECK29-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
26042 // CHECK29-NEXT:    store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4
26043 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
26044 // CHECK29:       omp.inner.for.cond54:
26045 // CHECK29-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
26046 // CHECK29-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12
26047 // CHECK29-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]]
26048 // CHECK29-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
26049 // CHECK29:       omp.inner.for.body56:
26050 // CHECK29-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
26051 // CHECK29-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4
26052 // CHECK29-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
26053 // CHECK29-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
26054 // CHECK29-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12
26055 // CHECK29-NEXT:    [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
26056 // CHECK29-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1
26057 // CHECK29-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12
26058 // CHECK29-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
26059 // CHECK29-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP42]] to i32
26060 // CHECK29-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
26061 // CHECK29-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
26062 // CHECK29-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12
26063 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
26064 // CHECK29:       omp.body.continue64:
26065 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
26066 // CHECK29:       omp.inner.for.inc65:
26067 // CHECK29-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
26068 // CHECK29-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1
26069 // CHECK29-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
26070 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]]
26071 // CHECK29:       omp.inner.for.end67:
26072 // CHECK29-NEXT:    store i16 22, i16* [[IT53]], align 2
26073 // CHECK29-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
26074 // CHECK29-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4
26075 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
26076 // CHECK29-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
26077 // CHECK29-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
26078 // CHECK29-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4
26079 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
26080 // CHECK29:       omp.inner.for.cond73:
26081 // CHECK29-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
26082 // CHECK29-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15
26083 // CHECK29-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]]
26084 // CHECK29-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
26085 // CHECK29:       omp.inner.for.body75:
26086 // CHECK29-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
26087 // CHECK29-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1
26088 // CHECK29-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
26089 // CHECK29-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
26090 // CHECK29-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15
26091 // CHECK29-NEXT:    [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
26092 // CHECK29-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1
26093 // CHECK29-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15
26094 // CHECK29-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
26095 // CHECK29-NEXT:    [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
26096 // CHECK29-NEXT:    [[CONV80:%.*]] = fpext float [[TMP50]] to double
26097 // CHECK29-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
26098 // CHECK29-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
26099 // CHECK29-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
26100 // CHECK29-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
26101 // CHECK29-NEXT:    [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
26102 // CHECK29-NEXT:    [[CONV84:%.*]] = fpext float [[TMP51]] to double
26103 // CHECK29-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
26104 // CHECK29-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
26105 // CHECK29-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
26106 // CHECK29-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
26107 // CHECK29-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2
26108 // CHECK29-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
26109 // CHECK29-NEXT:    [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00
26110 // CHECK29-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
26111 // CHECK29-NEXT:    [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]]
26112 // CHECK29-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]]
26113 // CHECK29-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3
26114 // CHECK29-NEXT:    [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
26115 // CHECK29-NEXT:    [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00
26116 // CHECK29-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
26117 // CHECK29-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
26118 // CHECK29-NEXT:    [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
26119 // CHECK29-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1
26120 // CHECK29-NEXT:    store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15
26121 // CHECK29-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
26122 // CHECK29-NEXT:    [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
26123 // CHECK29-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP56]] to i32
26124 // CHECK29-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
26125 // CHECK29-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
26126 // CHECK29-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15
26127 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
26128 // CHECK29:       omp.body.continue97:
26129 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
26130 // CHECK29:       omp.inner.for.inc98:
26131 // CHECK29-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
26132 // CHECK29-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1
26133 // CHECK29-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
26134 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]]
26135 // CHECK29:       omp.inner.for.end100:
26136 // CHECK29-NEXT:    store i8 96, i8* [[IT72]], align 1
26137 // CHECK29-NEXT:    [[TMP58:%.*]] = load i32, i32* [[A]], align 4
26138 // CHECK29-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
26139 // CHECK29-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
26140 // CHECK29-NEXT:    ret i32 [[TMP58]]
26141 //
26142 //
26143 // CHECK29-LABEL: define {{[^@]+}}@_Z3bari
26144 // CHECK29-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
26145 // CHECK29-NEXT:  entry:
26146 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26147 // CHECK29-NEXT:    [[A:%.*]] = alloca i32, align 4
26148 // CHECK29-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
26149 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26150 // CHECK29-NEXT:    store i32 0, i32* [[A]], align 4
26151 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26152 // CHECK29-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
26153 // CHECK29-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
26154 // CHECK29-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
26155 // CHECK29-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
26156 // CHECK29-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26157 // CHECK29-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
26158 // CHECK29-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
26159 // CHECK29-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
26160 // CHECK29-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
26161 // CHECK29-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
26162 // CHECK29-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
26163 // CHECK29-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
26164 // CHECK29-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
26165 // CHECK29-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
26166 // CHECK29-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
26167 // CHECK29-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
26168 // CHECK29-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
26169 // CHECK29-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
26170 // CHECK29-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
26171 // CHECK29-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
26172 // CHECK29-NEXT:    ret i32 [[TMP8]]
26173 //
26174 //
26175 // CHECK29-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
26176 // CHECK29-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
26177 // CHECK29-NEXT:  entry:
26178 // CHECK29-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
26179 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26180 // CHECK29-NEXT:    [[B:%.*]] = alloca i32, align 4
26181 // CHECK29-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
26182 // CHECK29-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
26183 // CHECK29-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
26184 // CHECK29-NEXT:    [[TMP:%.*]] = alloca i64, align 8
26185 // CHECK29-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
26186 // CHECK29-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
26187 // CHECK29-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
26188 // CHECK29-NEXT:    [[IT:%.*]] = alloca i64, align 8
26189 // CHECK29-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
26190 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26191 // CHECK29-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
26192 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26193 // CHECK29-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
26194 // CHECK29-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
26195 // CHECK29-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
26196 // CHECK29-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
26197 // CHECK29-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
26198 // CHECK29-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
26199 // CHECK29-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
26200 // CHECK29-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
26201 // CHECK29-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
26202 // CHECK29-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
26203 // CHECK29-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
26204 // CHECK29-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
26205 // CHECK29-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
26206 // CHECK29-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
26207 // CHECK29-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
26208 // CHECK29-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
26209 // CHECK29-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
26210 // CHECK29-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
26211 // CHECK29-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
26212 // CHECK29-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
26213 // CHECK29:       omp_if.then:
26214 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26215 // CHECK29:       omp.inner.for.cond:
26216 // CHECK29-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
26217 // CHECK29-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
26218 // CHECK29-NEXT:    [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]]
26219 // CHECK29-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26220 // CHECK29:       omp.inner.for.body:
26221 // CHECK29-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
26222 // CHECK29-NEXT:    [[MUL:%.*]] = mul i64 [[TMP10]], 400
26223 // CHECK29-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
26224 // CHECK29-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
26225 // CHECK29-NEXT:    [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
26226 // CHECK29-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP11]] to double
26227 // CHECK29-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
26228 // CHECK29-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
26229 // CHECK29-NEXT:    store double [[ADD3]], double* [[A]], align 8, !nontemporal !19, !llvm.access.group !18
26230 // CHECK29-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
26231 // CHECK29-NEXT:    [[TMP12:%.*]] = load double, double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18
26232 // CHECK29-NEXT:    [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00
26233 // CHECK29-NEXT:    store double [[INC]], double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18
26234 // CHECK29-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
26235 // CHECK29-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
26236 // CHECK29-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
26237 // CHECK29-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
26238 // CHECK29-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18
26239 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26240 // CHECK29:       omp.body.continue:
26241 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26242 // CHECK29:       omp.inner.for.inc:
26243 // CHECK29-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
26244 // CHECK29-NEXT:    [[ADD7:%.*]] = add i64 [[TMP14]], 1
26245 // CHECK29-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
26246 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
26247 // CHECK29:       omp.inner.for.end:
26248 // CHECK29-NEXT:    br label [[OMP_IF_END:%.*]]
26249 // CHECK29:       omp_if.else:
26250 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
26251 // CHECK29:       omp.inner.for.cond8:
26252 // CHECK29-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
26253 // CHECK29-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
26254 // CHECK29-NEXT:    [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]]
26255 // CHECK29-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]]
26256 // CHECK29:       omp.inner.for.body10:
26257 // CHECK29-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
26258 // CHECK29-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP17]], 400
26259 // CHECK29-NEXT:    [[SUB12:%.*]] = sub i64 2000, [[MUL11]]
26260 // CHECK29-NEXT:    store i64 [[SUB12]], i64* [[IT]], align 8
26261 // CHECK29-NEXT:    [[TMP18:%.*]] = load i32, i32* [[B]], align 4
26262 // CHECK29-NEXT:    [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double
26263 // CHECK29-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00
26264 // CHECK29-NEXT:    [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
26265 // CHECK29-NEXT:    store double [[ADD14]], double* [[A15]], align 8
26266 // CHECK29-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
26267 // CHECK29-NEXT:    [[TMP19:%.*]] = load double, double* [[A16]], align 8
26268 // CHECK29-NEXT:    [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00
26269 // CHECK29-NEXT:    store double [[INC17]], double* [[A16]], align 8
26270 // CHECK29-NEXT:    [[CONV18:%.*]] = fptosi double [[INC17]] to i16
26271 // CHECK29-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]]
26272 // CHECK29-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]]
26273 // CHECK29-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i64 1
26274 // CHECK29-NEXT:    store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2
26275 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE21:%.*]]
26276 // CHECK29:       omp.body.continue21:
26277 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC22:%.*]]
26278 // CHECK29:       omp.inner.for.inc22:
26279 // CHECK29-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
26280 // CHECK29-NEXT:    [[ADD23:%.*]] = add i64 [[TMP21]], 1
26281 // CHECK29-NEXT:    store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8
26282 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]]
26283 // CHECK29:       omp.inner.for.end24:
26284 // CHECK29-NEXT:    br label [[OMP_IF_END]]
26285 // CHECK29:       omp_if.end:
26286 // CHECK29-NEXT:    store i64 400, i64* [[IT]], align 8
26287 // CHECK29-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
26288 // CHECK29-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]]
26289 // CHECK29-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i64 1
26290 // CHECK29-NEXT:    [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
26291 // CHECK29-NEXT:    [[CONV27:%.*]] = sext i16 [[TMP23]] to i32
26292 // CHECK29-NEXT:    [[TMP24:%.*]] = load i32, i32* [[B]], align 4
26293 // CHECK29-NEXT:    [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]]
26294 // CHECK29-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
26295 // CHECK29-NEXT:    call void @llvm.stackrestore(i8* [[TMP25]])
26296 // CHECK29-NEXT:    ret i32 [[ADD28]]
26297 //
26298 //
26299 // CHECK29-LABEL: define {{[^@]+}}@_ZL7fstatici
26300 // CHECK29-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
26301 // CHECK29-NEXT:  entry:
26302 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26303 // CHECK29-NEXT:    [[A:%.*]] = alloca i32, align 4
26304 // CHECK29-NEXT:    [[AA:%.*]] = alloca i16, align 2
26305 // CHECK29-NEXT:    [[AAA:%.*]] = alloca i8, align 1
26306 // CHECK29-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
26307 // CHECK29-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26308 // CHECK29-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26309 // CHECK29-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26310 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26311 // CHECK29-NEXT:    store i32 0, i32* [[A]], align 4
26312 // CHECK29-NEXT:    store i16 0, i16* [[AA]], align 2
26313 // CHECK29-NEXT:    store i8 0, i8* [[AAA]], align 1
26314 // CHECK29-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26315 // CHECK29-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
26316 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
26317 // CHECK29-NEXT:    ret i32 [[TMP0]]
26318 //
26319 //
26320 // CHECK29-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
26321 // CHECK29-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
26322 // CHECK29-NEXT:  entry:
26323 // CHECK29-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26324 // CHECK29-NEXT:    [[A:%.*]] = alloca i32, align 4
26325 // CHECK29-NEXT:    [[AA:%.*]] = alloca i16, align 2
26326 // CHECK29-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
26327 // CHECK29-NEXT:    [[TMP:%.*]] = alloca i64, align 8
26328 // CHECK29-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
26329 // CHECK29-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
26330 // CHECK29-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
26331 // CHECK29-NEXT:    [[I:%.*]] = alloca i64, align 8
26332 // CHECK29-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26333 // CHECK29-NEXT:    store i32 0, i32* [[A]], align 4
26334 // CHECK29-NEXT:    store i16 0, i16* [[AA]], align 2
26335 // CHECK29-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
26336 // CHECK29-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
26337 // CHECK29-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
26338 // CHECK29-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
26339 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26340 // CHECK29:       omp.inner.for.cond:
26341 // CHECK29-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
26342 // CHECK29-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !24
26343 // CHECK29-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
26344 // CHECK29-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26345 // CHECK29:       omp.inner.for.body:
26346 // CHECK29-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
26347 // CHECK29-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
26348 // CHECK29-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
26349 // CHECK29-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !24
26350 // CHECK29-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
26351 // CHECK29-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
26352 // CHECK29-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24
26353 // CHECK29-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
26354 // CHECK29-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
26355 // CHECK29-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
26356 // CHECK29-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
26357 // CHECK29-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24
26358 // CHECK29-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
26359 // CHECK29-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
26360 // CHECK29-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
26361 // CHECK29-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
26362 // CHECK29-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26363 // CHECK29:       omp.body.continue:
26364 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26365 // CHECK29:       omp.inner.for.inc:
26366 // CHECK29-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
26367 // CHECK29-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
26368 // CHECK29-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
26369 // CHECK29-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
26370 // CHECK29:       omp.inner.for.end:
26371 // CHECK29-NEXT:    store i64 11, i64* [[I]], align 8
26372 // CHECK29-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
26373 // CHECK29-NEXT:    ret i32 [[TMP8]]
26374 //
26375 //
26376 // CHECK30-LABEL: define {{[^@]+}}@_Z7get_valv
26377 // CHECK30-SAME: () #[[ATTR0:[0-9]+]] {
26378 // CHECK30-NEXT:  entry:
26379 // CHECK30-NEXT:    ret i64 0
26380 //
26381 //
26382 // CHECK30-LABEL: define {{[^@]+}}@_Z3fooi
26383 // CHECK30-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
26384 // CHECK30-NEXT:  entry:
26385 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26386 // CHECK30-NEXT:    [[A:%.*]] = alloca i32, align 4
26387 // CHECK30-NEXT:    [[AA:%.*]] = alloca i16, align 2
26388 // CHECK30-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
26389 // CHECK30-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
26390 // CHECK30-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
26391 // CHECK30-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
26392 // CHECK30-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i64, align 8
26393 // CHECK30-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
26394 // CHECK30-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26395 // CHECK30-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26396 // CHECK30-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26397 // CHECK30-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26398 // CHECK30-NEXT:    [[I:%.*]] = alloca i32, align 4
26399 // CHECK30-NEXT:    [[K:%.*]] = alloca i64, align 8
26400 // CHECK30-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
26401 // CHECK30-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
26402 // CHECK30-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
26403 // CHECK30-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
26404 // CHECK30-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
26405 // CHECK30-NEXT:    [[I7:%.*]] = alloca i32, align 4
26406 // CHECK30-NEXT:    [[K8:%.*]] = alloca i64, align 8
26407 // CHECK30-NEXT:    [[LIN:%.*]] = alloca i32, align 4
26408 // CHECK30-NEXT:    [[_TMP20:%.*]] = alloca i64, align 8
26409 // CHECK30-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
26410 // CHECK30-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
26411 // CHECK30-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
26412 // CHECK30-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
26413 // CHECK30-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
26414 // CHECK30-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
26415 // CHECK30-NEXT:    [[IT:%.*]] = alloca i64, align 8
26416 // CHECK30-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
26417 // CHECK30-NEXT:    [[A28:%.*]] = alloca i32, align 4
26418 // CHECK30-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
26419 // CHECK30-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
26420 // CHECK30-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
26421 // CHECK30-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
26422 // CHECK30-NEXT:    [[IT53:%.*]] = alloca i16, align 2
26423 // CHECK30-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26424 // CHECK30-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
26425 // CHECK30-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
26426 // CHECK30-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
26427 // CHECK30-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
26428 // CHECK30-NEXT:    [[IT72:%.*]] = alloca i8, align 1
26429 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26430 // CHECK30-NEXT:    store i32 0, i32* [[A]], align 4
26431 // CHECK30-NEXT:    store i16 0, i16* [[AA]], align 2
26432 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26433 // CHECK30-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
26434 // CHECK30-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
26435 // CHECK30-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
26436 // CHECK30-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
26437 // CHECK30-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
26438 // CHECK30-NEXT:    [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
26439 // CHECK30-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
26440 // CHECK30-NEXT:    [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]]
26441 // CHECK30-NEXT:    [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8
26442 // CHECK30-NEXT:    store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8
26443 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26444 // CHECK30-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
26445 // CHECK30-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26446 // CHECK30-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
26447 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26448 // CHECK30:       omp.inner.for.cond:
26449 // CHECK30-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
26450 // CHECK30-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
26451 // CHECK30-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
26452 // CHECK30-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26453 // CHECK30:       omp.inner.for.body:
26454 // CHECK30-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
26455 // CHECK30-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5
26456 // CHECK30-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
26457 // CHECK30-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
26458 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26459 // CHECK30:       omp.body.continue:
26460 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26461 // CHECK30:       omp.inner.for.inc:
26462 // CHECK30-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
26463 // CHECK30-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
26464 // CHECK30-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
26465 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
26466 // CHECK30:       omp.inner.for.end:
26467 // CHECK30-NEXT:    store i32 33, i32* [[I]], align 4
26468 // CHECK30-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
26469 // CHECK30-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
26470 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
26471 // CHECK30-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
26472 // CHECK30-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
26473 // CHECK30-NEXT:    store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4
26474 // CHECK30-NEXT:    [[TMP12:%.*]] = load i64, i64* [[K]], align 8
26475 // CHECK30-NEXT:    store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8
26476 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
26477 // CHECK30:       omp.inner.for.cond9:
26478 // CHECK30-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
26479 // CHECK30-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6
26480 // CHECK30-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
26481 // CHECK30-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
26482 // CHECK30:       omp.inner.for.body11:
26483 // CHECK30-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
26484 // CHECK30-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1
26485 // CHECK30-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
26486 // CHECK30-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !6
26487 // CHECK30-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !6
26488 // CHECK30-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
26489 // CHECK30-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3
26490 // CHECK30-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
26491 // CHECK30-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]]
26492 // CHECK30-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !6
26493 // CHECK30-NEXT:    [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !6
26494 // CHECK30-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1
26495 // CHECK30-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !6
26496 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
26497 // CHECK30:       omp.body.continue16:
26498 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
26499 // CHECK30:       omp.inner.for.inc17:
26500 // CHECK30-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
26501 // CHECK30-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1
26502 // CHECK30-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6
26503 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]]
26504 // CHECK30:       omp.inner.for.end19:
26505 // CHECK30-NEXT:    store i32 1, i32* [[I7]], align 4
26506 // CHECK30-NEXT:    [[TMP20:%.*]] = load i64, i64* [[K8]], align 8
26507 // CHECK30-NEXT:    store i64 [[TMP20]], i64* [[K]], align 8
26508 // CHECK30-NEXT:    store i32 12, i32* [[LIN]], align 4
26509 // CHECK30-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
26510 // CHECK30-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
26511 // CHECK30-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
26512 // CHECK30-NEXT:    store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8
26513 // CHECK30-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4
26514 // CHECK30-NEXT:    store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4
26515 // CHECK30-NEXT:    [[TMP23:%.*]] = load i32, i32* [[A]], align 4
26516 // CHECK30-NEXT:    store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4
26517 // CHECK30-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
26518 // CHECK30-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
26519 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
26520 // CHECK30:       omp.inner.for.cond29:
26521 // CHECK30-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26522 // CHECK30-NEXT:    [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !9
26523 // CHECK30-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]]
26524 // CHECK30-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
26525 // CHECK30:       omp.inner.for.body31:
26526 // CHECK30-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26527 // CHECK30-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP26]], 400
26528 // CHECK30-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
26529 // CHECK30-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !9
26530 // CHECK30-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !9
26531 // CHECK30-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP27]] to i64
26532 // CHECK30-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26533 // CHECK30-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
26534 // CHECK30-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]]
26535 // CHECK30-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
26536 // CHECK30-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
26537 // CHECK30-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !9
26538 // CHECK30-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !9
26539 // CHECK30-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP30]] to i64
26540 // CHECK30-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26541 // CHECK30-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !9
26542 // CHECK30-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]]
26543 // CHECK30-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
26544 // CHECK30-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
26545 // CHECK30-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !9
26546 // CHECK30-NEXT:    [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !9
26547 // CHECK30-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP33]] to i32
26548 // CHECK30-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
26549 // CHECK30-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
26550 // CHECK30-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !9
26551 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
26552 // CHECK30:       omp.body.continue45:
26553 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
26554 // CHECK30:       omp.inner.for.inc46:
26555 // CHECK30-NEXT:    [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26556 // CHECK30-NEXT:    [[ADD47:%.*]] = add i64 [[TMP34]], 1
26557 // CHECK30-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !9
26558 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]]
26559 // CHECK30:       omp.inner.for.end48:
26560 // CHECK30-NEXT:    store i64 400, i64* [[IT]], align 8
26561 // CHECK30-NEXT:    [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4
26562 // CHECK30-NEXT:    store i32 [[TMP35]], i32* [[LIN]], align 4
26563 // CHECK30-NEXT:    [[TMP36:%.*]] = load i32, i32* [[A28]], align 4
26564 // CHECK30-NEXT:    store i32 [[TMP36]], i32* [[A]], align 4
26565 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
26566 // CHECK30-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
26567 // CHECK30-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
26568 // CHECK30-NEXT:    store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4
26569 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
26570 // CHECK30:       omp.inner.for.cond54:
26571 // CHECK30-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
26572 // CHECK30-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !12
26573 // CHECK30-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]]
26574 // CHECK30-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
26575 // CHECK30:       omp.inner.for.body56:
26576 // CHECK30-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
26577 // CHECK30-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4
26578 // CHECK30-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
26579 // CHECK30-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
26580 // CHECK30-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !12
26581 // CHECK30-NEXT:    [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !12
26582 // CHECK30-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1
26583 // CHECK30-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !12
26584 // CHECK30-NEXT:    [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !12
26585 // CHECK30-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP42]] to i32
26586 // CHECK30-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
26587 // CHECK30-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
26588 // CHECK30-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !12
26589 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
26590 // CHECK30:       omp.body.continue64:
26591 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
26592 // CHECK30:       omp.inner.for.inc65:
26593 // CHECK30-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
26594 // CHECK30-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1
26595 // CHECK30-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !12
26596 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]]
26597 // CHECK30:       omp.inner.for.end67:
26598 // CHECK30-NEXT:    store i16 22, i16* [[IT53]], align 2
26599 // CHECK30-NEXT:    [[TMP44:%.*]] = load i32, i32* [[A]], align 4
26600 // CHECK30-NEXT:    store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4
26601 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
26602 // CHECK30-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
26603 // CHECK30-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
26604 // CHECK30-NEXT:    store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4
26605 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
26606 // CHECK30:       omp.inner.for.cond73:
26607 // CHECK30-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
26608 // CHECK30-NEXT:    [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !15
26609 // CHECK30-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]]
26610 // CHECK30-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
26611 // CHECK30:       omp.inner.for.body75:
26612 // CHECK30-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
26613 // CHECK30-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1
26614 // CHECK30-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
26615 // CHECK30-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
26616 // CHECK30-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !15
26617 // CHECK30-NEXT:    [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !15
26618 // CHECK30-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1
26619 // CHECK30-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !15
26620 // CHECK30-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2
26621 // CHECK30-NEXT:    [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !15
26622 // CHECK30-NEXT:    [[CONV80:%.*]] = fpext float [[TMP50]] to double
26623 // CHECK30-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
26624 // CHECK30-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
26625 // CHECK30-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !15
26626 // CHECK30-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3
26627 // CHECK30-NEXT:    [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
26628 // CHECK30-NEXT:    [[CONV84:%.*]] = fpext float [[TMP51]] to double
26629 // CHECK30-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
26630 // CHECK30-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
26631 // CHECK30-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !15
26632 // CHECK30-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1
26633 // CHECK30-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2
26634 // CHECK30-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
26635 // CHECK30-NEXT:    [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00
26636 // CHECK30-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !15
26637 // CHECK30-NEXT:    [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]]
26638 // CHECK30-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]]
26639 // CHECK30-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3
26640 // CHECK30-NEXT:    [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
26641 // CHECK30-NEXT:    [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00
26642 // CHECK30-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !15
26643 // CHECK30-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
26644 // CHECK30-NEXT:    [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group !15
26645 // CHECK30-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1
26646 // CHECK30-NEXT:    store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group !15
26647 // CHECK30-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
26648 // CHECK30-NEXT:    [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group !15
26649 // CHECK30-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP56]] to i32
26650 // CHECK30-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
26651 // CHECK30-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
26652 // CHECK30-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group !15
26653 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
26654 // CHECK30:       omp.body.continue97:
26655 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
26656 // CHECK30:       omp.inner.for.inc98:
26657 // CHECK30-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
26658 // CHECK30-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1
26659 // CHECK30-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !15
26660 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]]
26661 // CHECK30:       omp.inner.for.end100:
26662 // CHECK30-NEXT:    store i8 96, i8* [[IT72]], align 1
26663 // CHECK30-NEXT:    [[TMP58:%.*]] = load i32, i32* [[A]], align 4
26664 // CHECK30-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
26665 // CHECK30-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
26666 // CHECK30-NEXT:    ret i32 [[TMP58]]
26667 //
26668 //
26669 // CHECK30-LABEL: define {{[^@]+}}@_Z3bari
26670 // CHECK30-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
26671 // CHECK30-NEXT:  entry:
26672 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26673 // CHECK30-NEXT:    [[A:%.*]] = alloca i32, align 4
26674 // CHECK30-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
26675 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26676 // CHECK30-NEXT:    store i32 0, i32* [[A]], align 4
26677 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26678 // CHECK30-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]])
26679 // CHECK30-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
26680 // CHECK30-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
26681 // CHECK30-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
26682 // CHECK30-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26683 // CHECK30-NEXT:    [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]])
26684 // CHECK30-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
26685 // CHECK30-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
26686 // CHECK30-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
26687 // CHECK30-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
26688 // CHECK30-NEXT:    [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]])
26689 // CHECK30-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
26690 // CHECK30-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
26691 // CHECK30-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
26692 // CHECK30-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
26693 // CHECK30-NEXT:    [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]])
26694 // CHECK30-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
26695 // CHECK30-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
26696 // CHECK30-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
26697 // CHECK30-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
26698 // CHECK30-NEXT:    ret i32 [[TMP8]]
26699 //
26700 //
26701 // CHECK30-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
26702 // CHECK30-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 {
26703 // CHECK30-NEXT:  entry:
26704 // CHECK30-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8
26705 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26706 // CHECK30-NEXT:    [[B:%.*]] = alloca i32, align 4
26707 // CHECK30-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
26708 // CHECK30-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
26709 // CHECK30-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
26710 // CHECK30-NEXT:    [[TMP:%.*]] = alloca i64, align 8
26711 // CHECK30-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
26712 // CHECK30-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
26713 // CHECK30-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
26714 // CHECK30-NEXT:    [[IT:%.*]] = alloca i64, align 8
26715 // CHECK30-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8
26716 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26717 // CHECK30-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8
26718 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26719 // CHECK30-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
26720 // CHECK30-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
26721 // CHECK30-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
26722 // CHECK30-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
26723 // CHECK30-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
26724 // CHECK30-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
26725 // CHECK30-NEXT:    [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]]
26726 // CHECK30-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2
26727 // CHECK30-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
26728 // CHECK30-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4
26729 // CHECK30-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60
26730 // CHECK30-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
26731 // CHECK30-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
26732 // CHECK30-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
26733 // CHECK30-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
26734 // CHECK30-NEXT:    [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
26735 // CHECK30-NEXT:    store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8
26736 // CHECK30-NEXT:    [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
26737 // CHECK30-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1
26738 // CHECK30-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
26739 // CHECK30:       omp_if.then:
26740 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26741 // CHECK30:       omp.inner.for.cond:
26742 // CHECK30-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
26743 // CHECK30-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !18
26744 // CHECK30-NEXT:    [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]]
26745 // CHECK30-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26746 // CHECK30:       omp.inner.for.body:
26747 // CHECK30-NEXT:    [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
26748 // CHECK30-NEXT:    [[MUL:%.*]] = mul i64 [[TMP10]], 400
26749 // CHECK30-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
26750 // CHECK30-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !18
26751 // CHECK30-NEXT:    [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !18
26752 // CHECK30-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP11]] to double
26753 // CHECK30-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
26754 // CHECK30-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
26755 // CHECK30-NEXT:    store double [[ADD3]], double* [[A]], align 8, !nontemporal !19, !llvm.access.group !18
26756 // CHECK30-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
26757 // CHECK30-NEXT:    [[TMP12:%.*]] = load double, double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18
26758 // CHECK30-NEXT:    [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00
26759 // CHECK30-NEXT:    store double [[INC]], double* [[A4]], align 8, !nontemporal !19, !llvm.access.group !18
26760 // CHECK30-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
26761 // CHECK30-NEXT:    [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]]
26762 // CHECK30-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]]
26763 // CHECK30-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1
26764 // CHECK30-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !18
26765 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26766 // CHECK30:       omp.body.continue:
26767 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26768 // CHECK30:       omp.inner.for.inc:
26769 // CHECK30-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
26770 // CHECK30-NEXT:    [[ADD7:%.*]] = add i64 [[TMP14]], 1
26771 // CHECK30-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !18
26772 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
26773 // CHECK30:       omp.inner.for.end:
26774 // CHECK30-NEXT:    br label [[OMP_IF_END:%.*]]
26775 // CHECK30:       omp_if.else:
26776 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
26777 // CHECK30:       omp.inner.for.cond8:
26778 // CHECK30-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
26779 // CHECK30-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
26780 // CHECK30-NEXT:    [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]]
26781 // CHECK30-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]]
26782 // CHECK30:       omp.inner.for.body10:
26783 // CHECK30-NEXT:    [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
26784 // CHECK30-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP17]], 400
26785 // CHECK30-NEXT:    [[SUB12:%.*]] = sub i64 2000, [[MUL11]]
26786 // CHECK30-NEXT:    store i64 [[SUB12]], i64* [[IT]], align 8
26787 // CHECK30-NEXT:    [[TMP18:%.*]] = load i32, i32* [[B]], align 4
26788 // CHECK30-NEXT:    [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double
26789 // CHECK30-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00
26790 // CHECK30-NEXT:    [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
26791 // CHECK30-NEXT:    store double [[ADD14]], double* [[A15]], align 8
26792 // CHECK30-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
26793 // CHECK30-NEXT:    [[TMP19:%.*]] = load double, double* [[A16]], align 8
26794 // CHECK30-NEXT:    [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00
26795 // CHECK30-NEXT:    store double [[INC17]], double* [[A16]], align 8
26796 // CHECK30-NEXT:    [[CONV18:%.*]] = fptosi double [[INC17]] to i16
26797 // CHECK30-NEXT:    [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]]
26798 // CHECK30-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]]
26799 // CHECK30-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i64 1
26800 // CHECK30-NEXT:    store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2
26801 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE21:%.*]]
26802 // CHECK30:       omp.body.continue21:
26803 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC22:%.*]]
26804 // CHECK30:       omp.inner.for.inc22:
26805 // CHECK30-NEXT:    [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
26806 // CHECK30-NEXT:    [[ADD23:%.*]] = add i64 [[TMP21]], 1
26807 // CHECK30-NEXT:    store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8
26808 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]]
26809 // CHECK30:       omp.inner.for.end24:
26810 // CHECK30-NEXT:    br label [[OMP_IF_END]]
26811 // CHECK30:       omp_if.end:
26812 // CHECK30-NEXT:    store i64 400, i64* [[IT]], align 8
26813 // CHECK30-NEXT:    [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]]
26814 // CHECK30-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]]
26815 // CHECK30-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i64 1
26816 // CHECK30-NEXT:    [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
26817 // CHECK30-NEXT:    [[CONV27:%.*]] = sext i16 [[TMP23]] to i32
26818 // CHECK30-NEXT:    [[TMP24:%.*]] = load i32, i32* [[B]], align 4
26819 // CHECK30-NEXT:    [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]]
26820 // CHECK30-NEXT:    [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
26821 // CHECK30-NEXT:    call void @llvm.stackrestore(i8* [[TMP25]])
26822 // CHECK30-NEXT:    ret i32 [[ADD28]]
26823 //
26824 //
26825 // CHECK30-LABEL: define {{[^@]+}}@_ZL7fstatici
26826 // CHECK30-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
26827 // CHECK30-NEXT:  entry:
26828 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26829 // CHECK30-NEXT:    [[A:%.*]] = alloca i32, align 4
26830 // CHECK30-NEXT:    [[AA:%.*]] = alloca i16, align 2
26831 // CHECK30-NEXT:    [[AAA:%.*]] = alloca i8, align 1
26832 // CHECK30-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
26833 // CHECK30-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26834 // CHECK30-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26835 // CHECK30-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26836 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26837 // CHECK30-NEXT:    store i32 0, i32* [[A]], align 4
26838 // CHECK30-NEXT:    store i16 0, i16* [[AA]], align 2
26839 // CHECK30-NEXT:    store i8 0, i8* [[AAA]], align 1
26840 // CHECK30-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26841 // CHECK30-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
26842 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
26843 // CHECK30-NEXT:    ret i32 [[TMP0]]
26844 //
26845 //
26846 // CHECK30-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
26847 // CHECK30-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat {
26848 // CHECK30-NEXT:  entry:
26849 // CHECK30-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26850 // CHECK30-NEXT:    [[A:%.*]] = alloca i32, align 4
26851 // CHECK30-NEXT:    [[AA:%.*]] = alloca i16, align 2
26852 // CHECK30-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
26853 // CHECK30-NEXT:    [[TMP:%.*]] = alloca i64, align 8
26854 // CHECK30-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
26855 // CHECK30-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
26856 // CHECK30-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
26857 // CHECK30-NEXT:    [[I:%.*]] = alloca i64, align 8
26858 // CHECK30-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26859 // CHECK30-NEXT:    store i32 0, i32* [[A]], align 4
26860 // CHECK30-NEXT:    store i16 0, i16* [[AA]], align 2
26861 // CHECK30-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
26862 // CHECK30-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
26863 // CHECK30-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
26864 // CHECK30-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
26865 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26866 // CHECK30:       omp.inner.for.cond:
26867 // CHECK30-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
26868 // CHECK30-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !24
26869 // CHECK30-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
26870 // CHECK30-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26871 // CHECK30:       omp.inner.for.body:
26872 // CHECK30-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
26873 // CHECK30-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
26874 // CHECK30-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
26875 // CHECK30-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !24
26876 // CHECK30-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !24
26877 // CHECK30-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
26878 // CHECK30-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !24
26879 // CHECK30-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !24
26880 // CHECK30-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
26881 // CHECK30-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
26882 // CHECK30-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
26883 // CHECK30-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !24
26884 // CHECK30-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2
26885 // CHECK30-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
26886 // CHECK30-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
26887 // CHECK30-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
26888 // CHECK30-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26889 // CHECK30:       omp.body.continue:
26890 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26891 // CHECK30:       omp.inner.for.inc:
26892 // CHECK30-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
26893 // CHECK30-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
26894 // CHECK30-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !24
26895 // CHECK30-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
26896 // CHECK30:       omp.inner.for.end:
26897 // CHECK30-NEXT:    store i64 11, i64* [[I]], align 8
26898 // CHECK30-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
26899 // CHECK30-NEXT:    ret i32 [[TMP8]]
26900 //
26901 //
26902 // CHECK31-LABEL: define {{[^@]+}}@_Z7get_valv
26903 // CHECK31-SAME: () #[[ATTR0:[0-9]+]] {
26904 // CHECK31-NEXT:  entry:
26905 // CHECK31-NEXT:    ret i64 0
26906 //
26907 //
26908 // CHECK31-LABEL: define {{[^@]+}}@_Z3fooi
26909 // CHECK31-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
26910 // CHECK31-NEXT:  entry:
26911 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
26912 // CHECK31-NEXT:    [[A:%.*]] = alloca i32, align 4
26913 // CHECK31-NEXT:    [[AA:%.*]] = alloca i16, align 2
26914 // CHECK31-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
26915 // CHECK31-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
26916 // CHECK31-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
26917 // CHECK31-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
26918 // CHECK31-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
26919 // CHECK31-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
26920 // CHECK31-NEXT:    [[TMP:%.*]] = alloca i32, align 4
26921 // CHECK31-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
26922 // CHECK31-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
26923 // CHECK31-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
26924 // CHECK31-NEXT:    [[I:%.*]] = alloca i32, align 4
26925 // CHECK31-NEXT:    [[K:%.*]] = alloca i64, align 8
26926 // CHECK31-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
26927 // CHECK31-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
26928 // CHECK31-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
26929 // CHECK31-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
26930 // CHECK31-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
26931 // CHECK31-NEXT:    [[I7:%.*]] = alloca i32, align 4
26932 // CHECK31-NEXT:    [[K8:%.*]] = alloca i64, align 8
26933 // CHECK31-NEXT:    [[LIN:%.*]] = alloca i32, align 4
26934 // CHECK31-NEXT:    [[_TMP20:%.*]] = alloca i64, align 4
26935 // CHECK31-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
26936 // CHECK31-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
26937 // CHECK31-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
26938 // CHECK31-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
26939 // CHECK31-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
26940 // CHECK31-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
26941 // CHECK31-NEXT:    [[IT:%.*]] = alloca i64, align 8
26942 // CHECK31-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
26943 // CHECK31-NEXT:    [[A28:%.*]] = alloca i32, align 4
26944 // CHECK31-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
26945 // CHECK31-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
26946 // CHECK31-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
26947 // CHECK31-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
26948 // CHECK31-NEXT:    [[IT53:%.*]] = alloca i16, align 2
26949 // CHECK31-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
26950 // CHECK31-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
26951 // CHECK31-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
26952 // CHECK31-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
26953 // CHECK31-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
26954 // CHECK31-NEXT:    [[IT72:%.*]] = alloca i8, align 1
26955 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
26956 // CHECK31-NEXT:    store i32 0, i32* [[A]], align 4
26957 // CHECK31-NEXT:    store i16 0, i16* [[AA]], align 2
26958 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
26959 // CHECK31-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
26960 // CHECK31-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
26961 // CHECK31-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
26962 // CHECK31-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
26963 // CHECK31-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
26964 // CHECK31-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
26965 // CHECK31-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
26966 // CHECK31-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
26967 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
26968 // CHECK31-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
26969 // CHECK31-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
26970 // CHECK31-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
26971 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
26972 // CHECK31:       omp.inner.for.cond:
26973 // CHECK31-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
26974 // CHECK31-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
26975 // CHECK31-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
26976 // CHECK31-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
26977 // CHECK31:       omp.inner.for.body:
26978 // CHECK31-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
26979 // CHECK31-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
26980 // CHECK31-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
26981 // CHECK31-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
26982 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
26983 // CHECK31:       omp.body.continue:
26984 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
26985 // CHECK31:       omp.inner.for.inc:
26986 // CHECK31-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
26987 // CHECK31-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
26988 // CHECK31-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
26989 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
26990 // CHECK31:       omp.inner.for.end:
26991 // CHECK31-NEXT:    store i32 33, i32* [[I]], align 4
26992 // CHECK31-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
26993 // CHECK31-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
26994 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
26995 // CHECK31-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
26996 // CHECK31-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
26997 // CHECK31-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4
26998 // CHECK31-NEXT:    [[TMP10:%.*]] = load i64, i64* [[K]], align 8
26999 // CHECK31-NEXT:    store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8
27000 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
27001 // CHECK31:       omp.inner.for.cond9:
27002 // CHECK31-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27003 // CHECK31-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
27004 // CHECK31-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
27005 // CHECK31-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
27006 // CHECK31:       omp.inner.for.body11:
27007 // CHECK31-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27008 // CHECK31-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1
27009 // CHECK31-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
27010 // CHECK31-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7
27011 // CHECK31-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7
27012 // CHECK31-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27013 // CHECK31-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3
27014 // CHECK31-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
27015 // CHECK31-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
27016 // CHECK31-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7
27017 // CHECK31-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7
27018 // CHECK31-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
27019 // CHECK31-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7
27020 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
27021 // CHECK31:       omp.body.continue16:
27022 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
27023 // CHECK31:       omp.inner.for.inc17:
27024 // CHECK31-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27025 // CHECK31-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
27026 // CHECK31-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27027 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]]
27028 // CHECK31:       omp.inner.for.end19:
27029 // CHECK31-NEXT:    store i32 1, i32* [[I7]], align 4
27030 // CHECK31-NEXT:    [[TMP18:%.*]] = load i64, i64* [[K8]], align 8
27031 // CHECK31-NEXT:    store i64 [[TMP18]], i64* [[K]], align 8
27032 // CHECK31-NEXT:    store i32 12, i32* [[LIN]], align 4
27033 // CHECK31-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
27034 // CHECK31-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
27035 // CHECK31-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
27036 // CHECK31-NEXT:    store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8
27037 // CHECK31-NEXT:    [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4
27038 // CHECK31-NEXT:    store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4
27039 // CHECK31-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
27040 // CHECK31-NEXT:    store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4
27041 // CHECK31-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
27042 // CHECK31-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
27043 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
27044 // CHECK31:       omp.inner.for.cond29:
27045 // CHECK31-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27046 // CHECK31-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10
27047 // CHECK31-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
27048 // CHECK31-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
27049 // CHECK31:       omp.inner.for.body31:
27050 // CHECK31-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27051 // CHECK31-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP24]], 400
27052 // CHECK31-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
27053 // CHECK31-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10
27054 // CHECK31-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10
27055 // CHECK31-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP25]] to i64
27056 // CHECK31-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27057 // CHECK31-NEXT:    [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
27058 // CHECK31-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]]
27059 // CHECK31-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
27060 // CHECK31-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
27061 // CHECK31-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10
27062 // CHECK31-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10
27063 // CHECK31-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP28]] to i64
27064 // CHECK31-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27065 // CHECK31-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
27066 // CHECK31-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]]
27067 // CHECK31-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
27068 // CHECK31-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
27069 // CHECK31-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10
27070 // CHECK31-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
27071 // CHECK31-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP31]] to i32
27072 // CHECK31-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
27073 // CHECK31-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
27074 // CHECK31-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10
27075 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
27076 // CHECK31:       omp.body.continue45:
27077 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
27078 // CHECK31:       omp.inner.for.inc46:
27079 // CHECK31-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27080 // CHECK31-NEXT:    [[ADD47:%.*]] = add i64 [[TMP32]], 1
27081 // CHECK31-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27082 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]]
27083 // CHECK31:       omp.inner.for.end48:
27084 // CHECK31-NEXT:    store i64 400, i64* [[IT]], align 8
27085 // CHECK31-NEXT:    [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4
27086 // CHECK31-NEXT:    store i32 [[TMP33]], i32* [[LIN]], align 4
27087 // CHECK31-NEXT:    [[TMP34:%.*]] = load i32, i32* [[A28]], align 4
27088 // CHECK31-NEXT:    store i32 [[TMP34]], i32* [[A]], align 4
27089 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
27090 // CHECK31-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
27091 // CHECK31-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
27092 // CHECK31-NEXT:    store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4
27093 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
27094 // CHECK31:       omp.inner.for.cond54:
27095 // CHECK31-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
27096 // CHECK31-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13
27097 // CHECK31-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]]
27098 // CHECK31-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
27099 // CHECK31:       omp.inner.for.body56:
27100 // CHECK31-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
27101 // CHECK31-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4
27102 // CHECK31-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
27103 // CHECK31-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
27104 // CHECK31-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13
27105 // CHECK31-NEXT:    [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
27106 // CHECK31-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1
27107 // CHECK31-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13
27108 // CHECK31-NEXT:    [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
27109 // CHECK31-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP40]] to i32
27110 // CHECK31-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
27111 // CHECK31-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
27112 // CHECK31-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13
27113 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
27114 // CHECK31:       omp.body.continue64:
27115 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
27116 // CHECK31:       omp.inner.for.inc65:
27117 // CHECK31-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
27118 // CHECK31-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1
27119 // CHECK31-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
27120 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]]
27121 // CHECK31:       omp.inner.for.end67:
27122 // CHECK31-NEXT:    store i16 22, i16* [[IT53]], align 2
27123 // CHECK31-NEXT:    [[TMP42:%.*]] = load i32, i32* [[A]], align 4
27124 // CHECK31-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4
27125 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
27126 // CHECK31-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
27127 // CHECK31-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
27128 // CHECK31-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4
27129 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
27130 // CHECK31:       omp.inner.for.cond73:
27131 // CHECK31-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
27132 // CHECK31-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16
27133 // CHECK31-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]]
27134 // CHECK31-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
27135 // CHECK31:       omp.inner.for.body75:
27136 // CHECK31-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
27137 // CHECK31-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1
27138 // CHECK31-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
27139 // CHECK31-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
27140 // CHECK31-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16
27141 // CHECK31-NEXT:    [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
27142 // CHECK31-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1
27143 // CHECK31-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16
27144 // CHECK31-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
27145 // CHECK31-NEXT:    [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
27146 // CHECK31-NEXT:    [[CONV80:%.*]] = fpext float [[TMP48]] to double
27147 // CHECK31-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
27148 // CHECK31-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
27149 // CHECK31-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
27150 // CHECK31-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
27151 // CHECK31-NEXT:    [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
27152 // CHECK31-NEXT:    [[CONV84:%.*]] = fpext float [[TMP49]] to double
27153 // CHECK31-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
27154 // CHECK31-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
27155 // CHECK31-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
27156 // CHECK31-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
27157 // CHECK31-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2
27158 // CHECK31-NEXT:    [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
27159 // CHECK31-NEXT:    [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00
27160 // CHECK31-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
27161 // CHECK31-NEXT:    [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]]
27162 // CHECK31-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]]
27163 // CHECK31-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3
27164 // CHECK31-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
27165 // CHECK31-NEXT:    [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00
27166 // CHECK31-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
27167 // CHECK31-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
27168 // CHECK31-NEXT:    [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
27169 // CHECK31-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1
27170 // CHECK31-NEXT:    store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16
27171 // CHECK31-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
27172 // CHECK31-NEXT:    [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
27173 // CHECK31-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP54]] to i32
27174 // CHECK31-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
27175 // CHECK31-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
27176 // CHECK31-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16
27177 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
27178 // CHECK31:       omp.body.continue97:
27179 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
27180 // CHECK31:       omp.inner.for.inc98:
27181 // CHECK31-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
27182 // CHECK31-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1
27183 // CHECK31-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
27184 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]]
27185 // CHECK31:       omp.inner.for.end100:
27186 // CHECK31-NEXT:    store i8 96, i8* [[IT72]], align 1
27187 // CHECK31-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
27188 // CHECK31-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
27189 // CHECK31-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
27190 // CHECK31-NEXT:    ret i32 [[TMP56]]
27191 //
27192 //
27193 // CHECK31-LABEL: define {{[^@]+}}@_Z3bari
27194 // CHECK31-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
27195 // CHECK31-NEXT:  entry:
27196 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27197 // CHECK31-NEXT:    [[A:%.*]] = alloca i32, align 4
27198 // CHECK31-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
27199 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27200 // CHECK31-NEXT:    store i32 0, i32* [[A]], align 4
27201 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27202 // CHECK31-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
27203 // CHECK31-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
27204 // CHECK31-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
27205 // CHECK31-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
27206 // CHECK31-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
27207 // CHECK31-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
27208 // CHECK31-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
27209 // CHECK31-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
27210 // CHECK31-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
27211 // CHECK31-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
27212 // CHECK31-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
27213 // CHECK31-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
27214 // CHECK31-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
27215 // CHECK31-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
27216 // CHECK31-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
27217 // CHECK31-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
27218 // CHECK31-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
27219 // CHECK31-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
27220 // CHECK31-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
27221 // CHECK31-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
27222 // CHECK31-NEXT:    ret i32 [[TMP8]]
27223 //
27224 //
27225 // CHECK31-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
27226 // CHECK31-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
27227 // CHECK31-NEXT:  entry:
27228 // CHECK31-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
27229 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27230 // CHECK31-NEXT:    [[B:%.*]] = alloca i32, align 4
27231 // CHECK31-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
27232 // CHECK31-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
27233 // CHECK31-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
27234 // CHECK31-NEXT:    [[TMP:%.*]] = alloca i64, align 4
27235 // CHECK31-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
27236 // CHECK31-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
27237 // CHECK31-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
27238 // CHECK31-NEXT:    [[IT:%.*]] = alloca i64, align 8
27239 // CHECK31-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
27240 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27241 // CHECK31-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
27242 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27243 // CHECK31-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
27244 // CHECK31-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
27245 // CHECK31-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
27246 // CHECK31-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
27247 // CHECK31-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
27248 // CHECK31-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
27249 // CHECK31-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
27250 // CHECK31-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
27251 // CHECK31-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
27252 // CHECK31-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
27253 // CHECK31-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
27254 // CHECK31-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
27255 // CHECK31-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
27256 // CHECK31-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
27257 // CHECK31-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
27258 // CHECK31-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
27259 // CHECK31-NEXT:    [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
27260 // CHECK31-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
27261 // CHECK31-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
27262 // CHECK31:       omp_if.then:
27263 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27264 // CHECK31:       omp.inner.for.cond:
27265 // CHECK31-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
27266 // CHECK31-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19
27267 // CHECK31-NEXT:    [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
27268 // CHECK31-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27269 // CHECK31:       omp.inner.for.body:
27270 // CHECK31-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
27271 // CHECK31-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
27272 // CHECK31-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
27273 // CHECK31-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19
27274 // CHECK31-NEXT:    [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
27275 // CHECK31-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP10]] to double
27276 // CHECK31-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
27277 // CHECK31-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
27278 // CHECK31-NEXT:    store double [[ADD3]], double* [[A]], align 4, !nontemporal !20, !llvm.access.group !19
27279 // CHECK31-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
27280 // CHECK31-NEXT:    [[TMP11:%.*]] = load double, double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19
27281 // CHECK31-NEXT:    [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00
27282 // CHECK31-NEXT:    store double [[INC]], double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19
27283 // CHECK31-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
27284 // CHECK31-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
27285 // CHECK31-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
27286 // CHECK31-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
27287 // CHECK31-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19
27288 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27289 // CHECK31:       omp.body.continue:
27290 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27291 // CHECK31:       omp.inner.for.inc:
27292 // CHECK31-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
27293 // CHECK31-NEXT:    [[ADD7:%.*]] = add i64 [[TMP13]], 1
27294 // CHECK31-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
27295 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
27296 // CHECK31:       omp.inner.for.end:
27297 // CHECK31-NEXT:    br label [[OMP_IF_END:%.*]]
27298 // CHECK31:       omp_if.else:
27299 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
27300 // CHECK31:       omp.inner.for.cond8:
27301 // CHECK31-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
27302 // CHECK31-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
27303 // CHECK31-NEXT:    [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]]
27304 // CHECK31-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]]
27305 // CHECK31:       omp.inner.for.body10:
27306 // CHECK31-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
27307 // CHECK31-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP16]], 400
27308 // CHECK31-NEXT:    [[SUB12:%.*]] = sub i64 2000, [[MUL11]]
27309 // CHECK31-NEXT:    store i64 [[SUB12]], i64* [[IT]], align 8
27310 // CHECK31-NEXT:    [[TMP17:%.*]] = load i32, i32* [[B]], align 4
27311 // CHECK31-NEXT:    [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double
27312 // CHECK31-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00
27313 // CHECK31-NEXT:    [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
27314 // CHECK31-NEXT:    store double [[ADD14]], double* [[A15]], align 4
27315 // CHECK31-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
27316 // CHECK31-NEXT:    [[TMP18:%.*]] = load double, double* [[A16]], align 4
27317 // CHECK31-NEXT:    [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00
27318 // CHECK31-NEXT:    store double [[INC17]], double* [[A16]], align 4
27319 // CHECK31-NEXT:    [[CONV18:%.*]] = fptosi double [[INC17]] to i16
27320 // CHECK31-NEXT:    [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]]
27321 // CHECK31-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]]
27322 // CHECK31-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i32 1
27323 // CHECK31-NEXT:    store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2
27324 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE21:%.*]]
27325 // CHECK31:       omp.body.continue21:
27326 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC22:%.*]]
27327 // CHECK31:       omp.inner.for.inc22:
27328 // CHECK31-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
27329 // CHECK31-NEXT:    [[ADD23:%.*]] = add i64 [[TMP20]], 1
27330 // CHECK31-NEXT:    store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8
27331 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]]
27332 // CHECK31:       omp.inner.for.end24:
27333 // CHECK31-NEXT:    br label [[OMP_IF_END]]
27334 // CHECK31:       omp_if.end:
27335 // CHECK31-NEXT:    store i64 400, i64* [[IT]], align 8
27336 // CHECK31-NEXT:    [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]]
27337 // CHECK31-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]]
27338 // CHECK31-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1
27339 // CHECK31-NEXT:    [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
27340 // CHECK31-NEXT:    [[CONV27:%.*]] = sext i16 [[TMP22]] to i32
27341 // CHECK31-NEXT:    [[TMP23:%.*]] = load i32, i32* [[B]], align 4
27342 // CHECK31-NEXT:    [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]]
27343 // CHECK31-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
27344 // CHECK31-NEXT:    call void @llvm.stackrestore(i8* [[TMP24]])
27345 // CHECK31-NEXT:    ret i32 [[ADD28]]
27346 //
27347 //
27348 // CHECK31-LABEL: define {{[^@]+}}@_ZL7fstatici
27349 // CHECK31-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
27350 // CHECK31-NEXT:  entry:
27351 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27352 // CHECK31-NEXT:    [[A:%.*]] = alloca i32, align 4
27353 // CHECK31-NEXT:    [[AA:%.*]] = alloca i16, align 2
27354 // CHECK31-NEXT:    [[AAA:%.*]] = alloca i8, align 1
27355 // CHECK31-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
27356 // CHECK31-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27357 // CHECK31-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27358 // CHECK31-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27359 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27360 // CHECK31-NEXT:    store i32 0, i32* [[A]], align 4
27361 // CHECK31-NEXT:    store i16 0, i16* [[AA]], align 2
27362 // CHECK31-NEXT:    store i8 0, i8* [[AAA]], align 1
27363 // CHECK31-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27364 // CHECK31-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
27365 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
27366 // CHECK31-NEXT:    ret i32 [[TMP0]]
27367 //
27368 //
27369 // CHECK31-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
27370 // CHECK31-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
27371 // CHECK31-NEXT:  entry:
27372 // CHECK31-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27373 // CHECK31-NEXT:    [[A:%.*]] = alloca i32, align 4
27374 // CHECK31-NEXT:    [[AA:%.*]] = alloca i16, align 2
27375 // CHECK31-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
27376 // CHECK31-NEXT:    [[TMP:%.*]] = alloca i64, align 4
27377 // CHECK31-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
27378 // CHECK31-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
27379 // CHECK31-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
27380 // CHECK31-NEXT:    [[I:%.*]] = alloca i64, align 8
27381 // CHECK31-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27382 // CHECK31-NEXT:    store i32 0, i32* [[A]], align 4
27383 // CHECK31-NEXT:    store i16 0, i16* [[AA]], align 2
27384 // CHECK31-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
27385 // CHECK31-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
27386 // CHECK31-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
27387 // CHECK31-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
27388 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27389 // CHECK31:       omp.inner.for.cond:
27390 // CHECK31-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
27391 // CHECK31-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !25
27392 // CHECK31-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
27393 // CHECK31-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27394 // CHECK31:       omp.inner.for.body:
27395 // CHECK31-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
27396 // CHECK31-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
27397 // CHECK31-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
27398 // CHECK31-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !25
27399 // CHECK31-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
27400 // CHECK31-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
27401 // CHECK31-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25
27402 // CHECK31-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
27403 // CHECK31-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
27404 // CHECK31-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
27405 // CHECK31-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
27406 // CHECK31-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25
27407 // CHECK31-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
27408 // CHECK31-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
27409 // CHECK31-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
27410 // CHECK31-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
27411 // CHECK31-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27412 // CHECK31:       omp.body.continue:
27413 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27414 // CHECK31:       omp.inner.for.inc:
27415 // CHECK31-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
27416 // CHECK31-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
27417 // CHECK31-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
27418 // CHECK31-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
27419 // CHECK31:       omp.inner.for.end:
27420 // CHECK31-NEXT:    store i64 11, i64* [[I]], align 8
27421 // CHECK31-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
27422 // CHECK31-NEXT:    ret i32 [[TMP8]]
27423 //
27424 //
27425 // CHECK32-LABEL: define {{[^@]+}}@_Z7get_valv
27426 // CHECK32-SAME: () #[[ATTR0:[0-9]+]] {
27427 // CHECK32-NEXT:  entry:
27428 // CHECK32-NEXT:    ret i64 0
27429 //
27430 //
27431 // CHECK32-LABEL: define {{[^@]+}}@_Z3fooi
27432 // CHECK32-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
27433 // CHECK32-NEXT:  entry:
27434 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27435 // CHECK32-NEXT:    [[A:%.*]] = alloca i32, align 4
27436 // CHECK32-NEXT:    [[AA:%.*]] = alloca i16, align 2
27437 // CHECK32-NEXT:    [[B:%.*]] = alloca [10 x float], align 4
27438 // CHECK32-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
27439 // CHECK32-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
27440 // CHECK32-NEXT:    [[C:%.*]] = alloca [5 x [10 x double]], align 8
27441 // CHECK32-NEXT:    [[__VLA_EXPR1:%.*]] = alloca i32, align 4
27442 // CHECK32-NEXT:    [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
27443 // CHECK32-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27444 // CHECK32-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27445 // CHECK32-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27446 // CHECK32-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
27447 // CHECK32-NEXT:    [[I:%.*]] = alloca i32, align 4
27448 // CHECK32-NEXT:    [[K:%.*]] = alloca i64, align 8
27449 // CHECK32-NEXT:    [[_TMP3:%.*]] = alloca i32, align 4
27450 // CHECK32-NEXT:    [[DOTOMP_LB4:%.*]] = alloca i32, align 4
27451 // CHECK32-NEXT:    [[DOTOMP_UB5:%.*]] = alloca i32, align 4
27452 // CHECK32-NEXT:    [[DOTOMP_IV6:%.*]] = alloca i32, align 4
27453 // CHECK32-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i64, align 8
27454 // CHECK32-NEXT:    [[I7:%.*]] = alloca i32, align 4
27455 // CHECK32-NEXT:    [[K8:%.*]] = alloca i64, align 8
27456 // CHECK32-NEXT:    [[LIN:%.*]] = alloca i32, align 4
27457 // CHECK32-NEXT:    [[_TMP20:%.*]] = alloca i64, align 4
27458 // CHECK32-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i64, align 8
27459 // CHECK32-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i64, align 8
27460 // CHECK32-NEXT:    [[DOTOMP_IV23:%.*]] = alloca i64, align 8
27461 // CHECK32-NEXT:    [[DOTLINEAR_START24:%.*]] = alloca i32, align 4
27462 // CHECK32-NEXT:    [[DOTLINEAR_START25:%.*]] = alloca i32, align 4
27463 // CHECK32-NEXT:    [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8
27464 // CHECK32-NEXT:    [[IT:%.*]] = alloca i64, align 8
27465 // CHECK32-NEXT:    [[LIN27:%.*]] = alloca i32, align 4
27466 // CHECK32-NEXT:    [[A28:%.*]] = alloca i32, align 4
27467 // CHECK32-NEXT:    [[_TMP49:%.*]] = alloca i16, align 2
27468 // CHECK32-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
27469 // CHECK32-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
27470 // CHECK32-NEXT:    [[DOTOMP_IV52:%.*]] = alloca i32, align 4
27471 // CHECK32-NEXT:    [[IT53:%.*]] = alloca i16, align 2
27472 // CHECK32-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
27473 // CHECK32-NEXT:    [[_TMP68:%.*]] = alloca i8, align 1
27474 // CHECK32-NEXT:    [[DOTOMP_LB69:%.*]] = alloca i32, align 4
27475 // CHECK32-NEXT:    [[DOTOMP_UB70:%.*]] = alloca i32, align 4
27476 // CHECK32-NEXT:    [[DOTOMP_IV71:%.*]] = alloca i32, align 4
27477 // CHECK32-NEXT:    [[IT72:%.*]] = alloca i8, align 1
27478 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27479 // CHECK32-NEXT:    store i32 0, i32* [[A]], align 4
27480 // CHECK32-NEXT:    store i16 0, i16* [[AA]], align 2
27481 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27482 // CHECK32-NEXT:    [[TMP1:%.*]] = call i8* @llvm.stacksave()
27483 // CHECK32-NEXT:    store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
27484 // CHECK32-NEXT:    [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4
27485 // CHECK32-NEXT:    store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
27486 // CHECK32-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
27487 // CHECK32-NEXT:    [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]]
27488 // CHECK32-NEXT:    [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8
27489 // CHECK32-NEXT:    store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4
27490 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27491 // CHECK32-NEXT:    store i32 5, i32* [[DOTOMP_UB]], align 4
27492 // CHECK32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
27493 // CHECK32-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
27494 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27495 // CHECK32:       omp.inner.for.cond:
27496 // CHECK32-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
27497 // CHECK32-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
27498 // CHECK32-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
27499 // CHECK32-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27500 // CHECK32:       omp.inner.for.body:
27501 // CHECK32-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
27502 // CHECK32-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5
27503 // CHECK32-NEXT:    [[ADD:%.*]] = add nsw i32 3, [[MUL]]
27504 // CHECK32-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3
27505 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27506 // CHECK32:       omp.body.continue:
27507 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27508 // CHECK32:       omp.inner.for.inc:
27509 // CHECK32-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
27510 // CHECK32-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1
27511 // CHECK32-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
27512 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
27513 // CHECK32:       omp.inner.for.end:
27514 // CHECK32-NEXT:    store i32 33, i32* [[I]], align 4
27515 // CHECK32-NEXT:    [[CALL:%.*]] = call noundef i64 @_Z7get_valv()
27516 // CHECK32-NEXT:    store i64 [[CALL]], i64* [[K]], align 8
27517 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB4]], align 4
27518 // CHECK32-NEXT:    store i32 8, i32* [[DOTOMP_UB5]], align 4
27519 // CHECK32-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4
27520 // CHECK32-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4
27521 // CHECK32-NEXT:    [[TMP10:%.*]] = load i64, i64* [[K]], align 8
27522 // CHECK32-NEXT:    store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8
27523 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND9:%.*]]
27524 // CHECK32:       omp.inner.for.cond9:
27525 // CHECK32-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27526 // CHECK32-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7
27527 // CHECK32-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
27528 // CHECK32-NEXT:    br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
27529 // CHECK32:       omp.inner.for.body11:
27530 // CHECK32-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27531 // CHECK32-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1
27532 // CHECK32-NEXT:    [[SUB:%.*]] = sub nsw i32 10, [[MUL12]]
27533 // CHECK32-NEXT:    store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group !7
27534 // CHECK32-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !7
27535 // CHECK32-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27536 // CHECK32-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3
27537 // CHECK32-NEXT:    [[CONV:%.*]] = sext i32 [[MUL13]] to i64
27538 // CHECK32-NEXT:    [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
27539 // CHECK32-NEXT:    store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group !7
27540 // CHECK32-NEXT:    [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !7
27541 // CHECK32-NEXT:    [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1
27542 // CHECK32-NEXT:    store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group !7
27543 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE16:%.*]]
27544 // CHECK32:       omp.body.continue16:
27545 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC17:%.*]]
27546 // CHECK32:       omp.inner.for.inc17:
27547 // CHECK32-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27548 // CHECK32-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1
27549 // CHECK32-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7
27550 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]]
27551 // CHECK32:       omp.inner.for.end19:
27552 // CHECK32-NEXT:    store i32 1, i32* [[I7]], align 4
27553 // CHECK32-NEXT:    [[TMP18:%.*]] = load i64, i64* [[K8]], align 8
27554 // CHECK32-NEXT:    store i64 [[TMP18]], i64* [[K]], align 8
27555 // CHECK32-NEXT:    store i32 12, i32* [[LIN]], align 4
27556 // CHECK32-NEXT:    store i64 0, i64* [[DOTOMP_LB21]], align 8
27557 // CHECK32-NEXT:    store i64 3, i64* [[DOTOMP_UB22]], align 8
27558 // CHECK32-NEXT:    [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8
27559 // CHECK32-NEXT:    store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8
27560 // CHECK32-NEXT:    [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4
27561 // CHECK32-NEXT:    store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4
27562 // CHECK32-NEXT:    [[TMP21:%.*]] = load i32, i32* [[A]], align 4
27563 // CHECK32-NEXT:    store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4
27564 // CHECK32-NEXT:    [[CALL26:%.*]] = call noundef i64 @_Z7get_valv()
27565 // CHECK32-NEXT:    store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8
27566 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND29:%.*]]
27567 // CHECK32:       omp.inner.for.cond29:
27568 // CHECK32-NEXT:    [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27569 // CHECK32-NEXT:    [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group !10
27570 // CHECK32-NEXT:    [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]]
27571 // CHECK32-NEXT:    br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]]
27572 // CHECK32:       omp.inner.for.body31:
27573 // CHECK32-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27574 // CHECK32-NEXT:    [[MUL32:%.*]] = mul i64 [[TMP24]], 400
27575 // CHECK32-NEXT:    [[SUB33:%.*]] = sub i64 2000, [[MUL32]]
27576 // CHECK32-NEXT:    store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group !10
27577 // CHECK32-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group !10
27578 // CHECK32-NEXT:    [[CONV34:%.*]] = sext i32 [[TMP25]] to i64
27579 // CHECK32-NEXT:    [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27580 // CHECK32-NEXT:    [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
27581 // CHECK32-NEXT:    [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]]
27582 // CHECK32-NEXT:    [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]]
27583 // CHECK32-NEXT:    [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32
27584 // CHECK32-NEXT:    store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group !10
27585 // CHECK32-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group !10
27586 // CHECK32-NEXT:    [[CONV38:%.*]] = sext i32 [[TMP28]] to i64
27587 // CHECK32-NEXT:    [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27588 // CHECK32-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group !10
27589 // CHECK32-NEXT:    [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]]
27590 // CHECK32-NEXT:    [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]]
27591 // CHECK32-NEXT:    [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32
27592 // CHECK32-NEXT:    store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group !10
27593 // CHECK32-NEXT:    [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !10
27594 // CHECK32-NEXT:    [[CONV42:%.*]] = sext i16 [[TMP31]] to i32
27595 // CHECK32-NEXT:    [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1
27596 // CHECK32-NEXT:    [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16
27597 // CHECK32-NEXT:    store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group !10
27598 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE45:%.*]]
27599 // CHECK32:       omp.body.continue45:
27600 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC46:%.*]]
27601 // CHECK32:       omp.inner.for.inc46:
27602 // CHECK32-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27603 // CHECK32-NEXT:    [[ADD47:%.*]] = add i64 [[TMP32]], 1
27604 // CHECK32-NEXT:    store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group !10
27605 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]]
27606 // CHECK32:       omp.inner.for.end48:
27607 // CHECK32-NEXT:    store i64 400, i64* [[IT]], align 8
27608 // CHECK32-NEXT:    [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4
27609 // CHECK32-NEXT:    store i32 [[TMP33]], i32* [[LIN]], align 4
27610 // CHECK32-NEXT:    [[TMP34:%.*]] = load i32, i32* [[A28]], align 4
27611 // CHECK32-NEXT:    store i32 [[TMP34]], i32* [[A]], align 4
27612 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
27613 // CHECK32-NEXT:    store i32 3, i32* [[DOTOMP_UB51]], align 4
27614 // CHECK32-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
27615 // CHECK32-NEXT:    store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4
27616 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND54:%.*]]
27617 // CHECK32:       omp.inner.for.cond54:
27618 // CHECK32-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
27619 // CHECK32-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !13
27620 // CHECK32-NEXT:    [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]]
27621 // CHECK32-NEXT:    br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]]
27622 // CHECK32:       omp.inner.for.body56:
27623 // CHECK32-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
27624 // CHECK32-NEXT:    [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4
27625 // CHECK32-NEXT:    [[ADD58:%.*]] = add nsw i32 6, [[MUL57]]
27626 // CHECK32-NEXT:    [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16
27627 // CHECK32-NEXT:    store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group !13
27628 // CHECK32-NEXT:    [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !13
27629 // CHECK32-NEXT:    [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1
27630 // CHECK32-NEXT:    store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group !13
27631 // CHECK32-NEXT:    [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !13
27632 // CHECK32-NEXT:    [[CONV61:%.*]] = sext i16 [[TMP40]] to i32
27633 // CHECK32-NEXT:    [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1
27634 // CHECK32-NEXT:    [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16
27635 // CHECK32-NEXT:    store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group !13
27636 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE64:%.*]]
27637 // CHECK32:       omp.body.continue64:
27638 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC65:%.*]]
27639 // CHECK32:       omp.inner.for.inc65:
27640 // CHECK32-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
27641 // CHECK32-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1
27642 // CHECK32-NEXT:    store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group !13
27643 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]]
27644 // CHECK32:       omp.inner.for.end67:
27645 // CHECK32-NEXT:    store i16 22, i16* [[IT53]], align 2
27646 // CHECK32-NEXT:    [[TMP42:%.*]] = load i32, i32* [[A]], align 4
27647 // CHECK32-NEXT:    store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4
27648 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB69]], align 4
27649 // CHECK32-NEXT:    store i32 25, i32* [[DOTOMP_UB70]], align 4
27650 // CHECK32-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4
27651 // CHECK32-NEXT:    store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4
27652 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND73:%.*]]
27653 // CHECK32:       omp.inner.for.cond73:
27654 // CHECK32-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
27655 // CHECK32-NEXT:    [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group !16
27656 // CHECK32-NEXT:    [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]]
27657 // CHECK32-NEXT:    br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
27658 // CHECK32:       omp.inner.for.body75:
27659 // CHECK32-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
27660 // CHECK32-NEXT:    [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1
27661 // CHECK32-NEXT:    [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]]
27662 // CHECK32-NEXT:    [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8
27663 // CHECK32-NEXT:    store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group !16
27664 // CHECK32-NEXT:    [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !16
27665 // CHECK32-NEXT:    [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1
27666 // CHECK32-NEXT:    store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group !16
27667 // CHECK32-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2
27668 // CHECK32-NEXT:    [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !16
27669 // CHECK32-NEXT:    [[CONV80:%.*]] = fpext float [[TMP48]] to double
27670 // CHECK32-NEXT:    [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00
27671 // CHECK32-NEXT:    [[CONV82:%.*]] = fptrunc double [[ADD81]] to float
27672 // CHECK32-NEXT:    store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group !16
27673 // CHECK32-NEXT:    [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3
27674 // CHECK32-NEXT:    [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
27675 // CHECK32-NEXT:    [[CONV84:%.*]] = fpext float [[TMP49]] to double
27676 // CHECK32-NEXT:    [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00
27677 // CHECK32-NEXT:    [[CONV86:%.*]] = fptrunc double [[ADD85]] to float
27678 // CHECK32-NEXT:    store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group !16
27679 // CHECK32-NEXT:    [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1
27680 // CHECK32-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2
27681 // CHECK32-NEXT:    [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
27682 // CHECK32-NEXT:    [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00
27683 // CHECK32-NEXT:    store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group !16
27684 // CHECK32-NEXT:    [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]]
27685 // CHECK32-NEXT:    [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]]
27686 // CHECK32-NEXT:    [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3
27687 // CHECK32-NEXT:    [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
27688 // CHECK32-NEXT:    [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00
27689 // CHECK32-NEXT:    store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group !16
27690 // CHECK32-NEXT:    [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0
27691 // CHECK32-NEXT:    [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group !16
27692 // CHECK32-NEXT:    [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1
27693 // CHECK32-NEXT:    store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group !16
27694 // CHECK32-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1
27695 // CHECK32-NEXT:    [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group !16
27696 // CHECK32-NEXT:    [[CONV94:%.*]] = sext i8 [[TMP54]] to i32
27697 // CHECK32-NEXT:    [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1
27698 // CHECK32-NEXT:    [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8
27699 // CHECK32-NEXT:    store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group !16
27700 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
27701 // CHECK32:       omp.body.continue97:
27702 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
27703 // CHECK32:       omp.inner.for.inc98:
27704 // CHECK32-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
27705 // CHECK32-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1
27706 // CHECK32-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group !16
27707 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]]
27708 // CHECK32:       omp.inner.for.end100:
27709 // CHECK32-NEXT:    store i8 96, i8* [[IT72]], align 1
27710 // CHECK32-NEXT:    [[TMP56:%.*]] = load i32, i32* [[A]], align 4
27711 // CHECK32-NEXT:    [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
27712 // CHECK32-NEXT:    call void @llvm.stackrestore(i8* [[TMP57]])
27713 // CHECK32-NEXT:    ret i32 [[TMP56]]
27714 //
27715 //
27716 // CHECK32-LABEL: define {{[^@]+}}@_Z3bari
27717 // CHECK32-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
27718 // CHECK32-NEXT:  entry:
27719 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27720 // CHECK32-NEXT:    [[A:%.*]] = alloca i32, align 4
27721 // CHECK32-NEXT:    [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4
27722 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27723 // CHECK32-NEXT:    store i32 0, i32* [[A]], align 4
27724 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27725 // CHECK32-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]])
27726 // CHECK32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 4
27727 // CHECK32-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]]
27728 // CHECK32-NEXT:    store i32 [[ADD]], i32* [[A]], align 4
27729 // CHECK32-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
27730 // CHECK32-NEXT:    [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]])
27731 // CHECK32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[A]], align 4
27732 // CHECK32-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]]
27733 // CHECK32-NEXT:    store i32 [[ADD2]], i32* [[A]], align 4
27734 // CHECK32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
27735 // CHECK32-NEXT:    [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]])
27736 // CHECK32-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A]], align 4
27737 // CHECK32-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]]
27738 // CHECK32-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4
27739 // CHECK32-NEXT:    [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4
27740 // CHECK32-NEXT:    [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]])
27741 // CHECK32-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4
27742 // CHECK32-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]]
27743 // CHECK32-NEXT:    store i32 [[ADD6]], i32* [[A]], align 4
27744 // CHECK32-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
27745 // CHECK32-NEXT:    ret i32 [[TMP8]]
27746 //
27747 //
27748 // CHECK32-LABEL: define {{[^@]+}}@_ZN2S12r1Ei
27749 // CHECK32-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 {
27750 // CHECK32-NEXT:  entry:
27751 // CHECK32-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4
27752 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27753 // CHECK32-NEXT:    [[B:%.*]] = alloca i32, align 4
27754 // CHECK32-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 4
27755 // CHECK32-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i32, align 4
27756 // CHECK32-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
27757 // CHECK32-NEXT:    [[TMP:%.*]] = alloca i64, align 4
27758 // CHECK32-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
27759 // CHECK32-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
27760 // CHECK32-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
27761 // CHECK32-NEXT:    [[IT:%.*]] = alloca i64, align 8
27762 // CHECK32-NEXT:    store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4
27763 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27764 // CHECK32-NEXT:    [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4
27765 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
27766 // CHECK32-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
27767 // CHECK32-NEXT:    store i32 [[ADD]], i32* [[B]], align 4
27768 // CHECK32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
27769 // CHECK32-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
27770 // CHECK32-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
27771 // CHECK32-NEXT:    [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]]
27772 // CHECK32-NEXT:    [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2
27773 // CHECK32-NEXT:    store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
27774 // CHECK32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
27775 // CHECK32-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60
27776 // CHECK32-NEXT:    [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
27777 // CHECK32-NEXT:    store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1
27778 // CHECK32-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
27779 // CHECK32-NEXT:    store i64 3, i64* [[DOTOMP_UB]], align 8
27780 // CHECK32-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
27781 // CHECK32-NEXT:    store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8
27782 // CHECK32-NEXT:    [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
27783 // CHECK32-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1
27784 // CHECK32-NEXT:    br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
27785 // CHECK32:       omp_if.then:
27786 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27787 // CHECK32:       omp.inner.for.cond:
27788 // CHECK32-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
27789 // CHECK32-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !19
27790 // CHECK32-NEXT:    [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]]
27791 // CHECK32-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27792 // CHECK32:       omp.inner.for.body:
27793 // CHECK32-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
27794 // CHECK32-NEXT:    [[MUL:%.*]] = mul i64 [[TMP9]], 400
27795 // CHECK32-NEXT:    [[SUB:%.*]] = sub i64 2000, [[MUL]]
27796 // CHECK32-NEXT:    store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group !19
27797 // CHECK32-NEXT:    [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group !19
27798 // CHECK32-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP10]] to double
27799 // CHECK32-NEXT:    [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00
27800 // CHECK32-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0
27801 // CHECK32-NEXT:    store double [[ADD3]], double* [[A]], align 4, !nontemporal !20, !llvm.access.group !19
27802 // CHECK32-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
27803 // CHECK32-NEXT:    [[TMP11:%.*]] = load double, double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19
27804 // CHECK32-NEXT:    [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00
27805 // CHECK32-NEXT:    store double [[INC]], double* [[A4]], align 4, !nontemporal !20, !llvm.access.group !19
27806 // CHECK32-NEXT:    [[CONV5:%.*]] = fptosi double [[INC]] to i16
27807 // CHECK32-NEXT:    [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]]
27808 // CHECK32-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]]
27809 // CHECK32-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1
27810 // CHECK32-NEXT:    store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group !19
27811 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27812 // CHECK32:       omp.body.continue:
27813 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27814 // CHECK32:       omp.inner.for.inc:
27815 // CHECK32-NEXT:    [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
27816 // CHECK32-NEXT:    [[ADD7:%.*]] = add i64 [[TMP13]], 1
27817 // CHECK32-NEXT:    store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !19
27818 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
27819 // CHECK32:       omp.inner.for.end:
27820 // CHECK32-NEXT:    br label [[OMP_IF_END:%.*]]
27821 // CHECK32:       omp_if.else:
27822 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND8:%.*]]
27823 // CHECK32:       omp.inner.for.cond8:
27824 // CHECK32-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
27825 // CHECK32-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
27826 // CHECK32-NEXT:    [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]]
27827 // CHECK32-NEXT:    br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]]
27828 // CHECK32:       omp.inner.for.body10:
27829 // CHECK32-NEXT:    [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
27830 // CHECK32-NEXT:    [[MUL11:%.*]] = mul i64 [[TMP16]], 400
27831 // CHECK32-NEXT:    [[SUB12:%.*]] = sub i64 2000, [[MUL11]]
27832 // CHECK32-NEXT:    store i64 [[SUB12]], i64* [[IT]], align 8
27833 // CHECK32-NEXT:    [[TMP17:%.*]] = load i32, i32* [[B]], align 4
27834 // CHECK32-NEXT:    [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double
27835 // CHECK32-NEXT:    [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00
27836 // CHECK32-NEXT:    [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
27837 // CHECK32-NEXT:    store double [[ADD14]], double* [[A15]], align 4
27838 // CHECK32-NEXT:    [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0
27839 // CHECK32-NEXT:    [[TMP18:%.*]] = load double, double* [[A16]], align 4
27840 // CHECK32-NEXT:    [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00
27841 // CHECK32-NEXT:    store double [[INC17]], double* [[A16]], align 4
27842 // CHECK32-NEXT:    [[CONV18:%.*]] = fptosi double [[INC17]] to i16
27843 // CHECK32-NEXT:    [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]]
27844 // CHECK32-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]]
27845 // CHECK32-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i32 1
27846 // CHECK32-NEXT:    store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2
27847 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE21:%.*]]
27848 // CHECK32:       omp.body.continue21:
27849 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC22:%.*]]
27850 // CHECK32:       omp.inner.for.inc22:
27851 // CHECK32-NEXT:    [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
27852 // CHECK32-NEXT:    [[ADD23:%.*]] = add i64 [[TMP20]], 1
27853 // CHECK32-NEXT:    store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8
27854 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]]
27855 // CHECK32:       omp.inner.for.end24:
27856 // CHECK32-NEXT:    br label [[OMP_IF_END]]
27857 // CHECK32:       omp_if.end:
27858 // CHECK32-NEXT:    store i64 400, i64* [[IT]], align 8
27859 // CHECK32-NEXT:    [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]]
27860 // CHECK32-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]]
27861 // CHECK32-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1
27862 // CHECK32-NEXT:    [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
27863 // CHECK32-NEXT:    [[CONV27:%.*]] = sext i16 [[TMP22]] to i32
27864 // CHECK32-NEXT:    [[TMP23:%.*]] = load i32, i32* [[B]], align 4
27865 // CHECK32-NEXT:    [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]]
27866 // CHECK32-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
27867 // CHECK32-NEXT:    call void @llvm.stackrestore(i8* [[TMP24]])
27868 // CHECK32-NEXT:    ret i32 [[ADD28]]
27869 //
27870 //
27871 // CHECK32-LABEL: define {{[^@]+}}@_ZL7fstatici
27872 // CHECK32-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] {
27873 // CHECK32-NEXT:  entry:
27874 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27875 // CHECK32-NEXT:    [[A:%.*]] = alloca i32, align 4
27876 // CHECK32-NEXT:    [[AA:%.*]] = alloca i16, align 2
27877 // CHECK32-NEXT:    [[AAA:%.*]] = alloca i8, align 1
27878 // CHECK32-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
27879 // CHECK32-NEXT:    [[TMP:%.*]] = alloca i32, align 4
27880 // CHECK32-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
27881 // CHECK32-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
27882 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27883 // CHECK32-NEXT:    store i32 0, i32* [[A]], align 4
27884 // CHECK32-NEXT:    store i16 0, i16* [[AA]], align 2
27885 // CHECK32-NEXT:    store i8 0, i8* [[AAA]], align 1
27886 // CHECK32-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
27887 // CHECK32-NEXT:    store i32 429496720, i32* [[DOTOMP_UB]], align 4
27888 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
27889 // CHECK32-NEXT:    ret i32 [[TMP0]]
27890 //
27891 //
27892 // CHECK32-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i
27893 // CHECK32-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat {
27894 // CHECK32-NEXT:  entry:
27895 // CHECK32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
27896 // CHECK32-NEXT:    [[A:%.*]] = alloca i32, align 4
27897 // CHECK32-NEXT:    [[AA:%.*]] = alloca i16, align 2
27898 // CHECK32-NEXT:    [[B:%.*]] = alloca [10 x i32], align 4
27899 // CHECK32-NEXT:    [[TMP:%.*]] = alloca i64, align 4
27900 // CHECK32-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
27901 // CHECK32-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
27902 // CHECK32-NEXT:    [[DOTOMP_IV:%.*]] = alloca i64, align 8
27903 // CHECK32-NEXT:    [[I:%.*]] = alloca i64, align 8
27904 // CHECK32-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
27905 // CHECK32-NEXT:    store i32 0, i32* [[A]], align 4
27906 // CHECK32-NEXT:    store i16 0, i16* [[AA]], align 2
27907 // CHECK32-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
27908 // CHECK32-NEXT:    store i64 6, i64* [[DOTOMP_UB]], align 8
27909 // CHECK32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
27910 // CHECK32-NEXT:    store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8
27911 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
27912 // CHECK32:       omp.inner.for.cond:
27913 // CHECK32-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
27914 // CHECK32-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !25
27915 // CHECK32-NEXT:    [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]]
27916 // CHECK32-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
27917 // CHECK32:       omp.inner.for.body:
27918 // CHECK32-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
27919 // CHECK32-NEXT:    [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3
27920 // CHECK32-NEXT:    [[ADD:%.*]] = add nsw i64 -10, [[MUL]]
27921 // CHECK32-NEXT:    store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group !25
27922 // CHECK32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !25
27923 // CHECK32-NEXT:    [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
27924 // CHECK32-NEXT:    store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group !25
27925 // CHECK32-NEXT:    [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group !25
27926 // CHECK32-NEXT:    [[CONV:%.*]] = sext i16 [[TMP5]] to i32
27927 // CHECK32-NEXT:    [[ADD2:%.*]] = add nsw i32 [[CONV]], 1
27928 // CHECK32-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16
27929 // CHECK32-NEXT:    store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group !25
27930 // CHECK32-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2
27931 // CHECK32-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
27932 // CHECK32-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1
27933 // CHECK32-NEXT:    store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
27934 // CHECK32-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
27935 // CHECK32:       omp.body.continue:
27936 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
27937 // CHECK32:       omp.inner.for.inc:
27938 // CHECK32-NEXT:    [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
27939 // CHECK32-NEXT:    [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1
27940 // CHECK32-NEXT:    store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group !25
27941 // CHECK32-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
27942 // CHECK32:       omp.inner.for.end:
27943 // CHECK32-NEXT:    store i64 11, i64* [[I]], align 8
27944 // CHECK32-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A]], align 4
27945 // CHECK32-NEXT:    ret i32 [[TMP8]]
27946 //
27947