1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test host code gen
3 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
4 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
5 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
6 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
9 
10 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
11 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
12 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5
13 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
14 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
15 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK7
16 
17 // RUN: %clang_cc1 -no-opaque-pointers  -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
18 // RUN: %clang_cc1 -no-opaque-pointers  -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
19 // RUN: %clang_cc1 -no-opaque-pointers  -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
20 // RUN: %clang_cc1 -no-opaque-pointers  -verify -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
21 // RUN: %clang_cc1 -no-opaque-pointers  -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
22 // RUN: %clang_cc1 -no-opaque-pointers  -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
23 
24 // RUN: %clang_cc1 -no-opaque-pointers  -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
25 // RUN: %clang_cc1 -no-opaque-pointers  -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
26 // RUN: %clang_cc1 -no-opaque-pointers  -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK13
27 // RUN: %clang_cc1 -no-opaque-pointers  -verify -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15
28 // RUN: %clang_cc1 -no-opaque-pointers  -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
29 // RUN: %clang_cc1 -no-opaque-pointers  -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK15
30 // expected-no-diagnostics
31 #ifndef HEADER
32 #define HEADER
33 
34 
35 template <typename T>
36 T tmain() {
37   T *a, *b, *c;
38   int n = 10000;
39   int ch = 100;
40 
41   // no schedule clauses
42   #pragma omp target
43   #pragma omp teams
44   #pragma omp distribute parallel for simd
45   for (int i = 0; i < n; ++i) {
46     a[i] = b[i] + c[i];
47   }
48 
49   // dist_schedule: static no chunk
50   #pragma omp target
51   #pragma omp teams
52   #pragma omp distribute parallel for simd dist_schedule(static)
53   for (int i = 0; i < n; ++i) {
54     a[i] = b[i] + c[i];
55   }
56 
57   // dist_schedule: static chunk
58   #pragma omp target
59   #pragma omp teams
60   #pragma omp distribute parallel for simd dist_schedule(static, ch)
61   for (int i = 0; i < n; ++i) {
62     a[i] = b[i] + c[i];
63   }
64 
65   // schedule: static no chunk
66   #pragma omp target
67   #pragma omp teams
68   #pragma omp distribute parallel for simd schedule(static)
69   for (int i = 0; i < n; ++i) {
70     a[i] = b[i] + c[i];
71   }
72 
73   // schedule: static chunk
74   #pragma omp target
75   #pragma omp teams
76   #pragma omp distribute parallel for simd schedule(static, ch)
77   for (int i = 0; i < n; ++i) {
78     a[i] = b[i] + c[i];
79   }
80 
81   // schedule: dynamic no chunk
82   #pragma omp target
83   #pragma omp teams
84   #pragma omp distribute parallel for simd schedule(dynamic)
85   for (int i = 0; i < n; ++i) {
86     a[i] = b[i] + c[i];
87   }
88 
89   // schedule: dynamic chunk
90   #pragma omp target
91   #pragma omp teams
92   #pragma omp distribute parallel for simd schedule(dynamic, ch)
93   for (int i = 0; i < n; ++i) {
94     a[i] = b[i] + c[i];
95   }
96 
97   return T();
98 }
99 
100 int main() {
101   double *a, *b, *c;
102   int n = 10000;
103   int ch = 100;
104 
105 #ifdef LAMBDA
106   [&]() {
107 
108 
109 
110 
111 
112 
113 
114 
115     // no schedule clauses
116     #pragma omp target
117     #pragma omp teams
118 
119     #pragma omp distribute parallel for simd
120     for (int i = 0; i < n; ++i) {
121       a[i] = b[i] + c[i];
122 
123 
124       // check EUB for distribute
125 
126       // initialize omp.iv
127 
128       // check exit condition
129 
130       // check that PrevLB and PrevUB are passed to the 'for'
131       // check that distlb and distub are properly passed to fork_call
132 
133       // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
134 
135 
136       // implementation of 'parallel for'
137 
138 
139       // initialize lb and ub to PrevLB and PrevUB
140 
141       // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
142       // In this case we use EUB
143 
144       // initialize omp.iv
145 
146       // check exit condition
147 
148       // check that PrevLB and PrevUB are passed to the 'for'
149 
150       // check stride 1 for 'for' in 'distribute parallel for simd'
151 
152 
153       [&]() {
154 	a[i] = b[i] + c[i];
155       }();
156     }
157 
158     // dist_schedule: static no chunk (same sa default - no dist_schedule)
159     #pragma omp target
160     #pragma omp teams
161 
162     #pragma omp distribute parallel for simd dist_schedule(static)
163     for (int i = 0; i < n; ++i) {
164       a[i] = b[i] + c[i];
165 
166 
167       // check EUB for distribute
168 
169       // initialize omp.iv
170 
171       // check exit condition
172 
173       // check that PrevLB and PrevUB are passed to the 'for'
174       // check that distlb and distub are properly passed to fork_call
175 
176       // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
177 
178 
179       // implementation of 'parallel for'
180 
181 
182       // initialize lb and ub to PrevLB and PrevUB
183 
184       // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
185       // In this case we use EUB
186 
187       // initialize omp.iv
188 
189       // check exit condition
190 
191       // check that PrevLB and PrevUB are passed to the 'for'
192 
193       // check stride 1 for 'for' in 'distribute parallel for simd'
194 
195       [&]() {
196 	a[i] = b[i] + c[i];
197       }();
198     }
199 
200     // dist_schedule: static chunk
201     #pragma omp target
202     #pragma omp teams
203 
204     #pragma omp distribute parallel for simd dist_schedule(static, ch)
205     for (int i = 0; i < n; ++i) {
206       a[i] = b[i] + c[i];
207 
208 
209       // check EUB for distribute
210 
211       // initialize omp.iv
212 
213       // check exit condition
214 
215       // check that PrevLB and PrevUB are passed to the 'for'
216       // check that distlb and distub are properly passed to fork_call
217 
218       // check DistInc
219 
220       // Update UB
221 
222       // Store LB in IV
223 
224 
225       // loop exit
226 
227       // skip implementation of 'parallel for': using default scheduling and was tested above
228       [&]() {
229 	a[i] = b[i] + c[i];
230       }();
231     }
232 
233     // schedule: static no chunk
234     #pragma omp target
235     #pragma omp teams
236 
237     #pragma omp distribute parallel for simd schedule(static)
238     for (int i = 0; i < n; ++i) {
239       a[i] = b[i] + c[i];
240 
241       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
242 
243       // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
244 
245 
246       // initialize lb and ub to PrevLB and PrevUB
247 
248       // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
249       // In this case we use EUB
250 
251       // initialize omp.iv
252 
253       // check exit condition
254 
255       // check that PrevLB and PrevUB are passed to the 'for'
256 
257       // check stride 1 for 'for' in 'distribute parallel for simd'
258 
259 
260       [&]() {
261 	a[i] = b[i] + c[i];
262       }();
263     }
264 
265     // schedule: static chunk
266     #pragma omp target
267     #pragma omp teams
268 
269     #pragma omp distribute parallel for simd schedule(static, ch)
270     for (int i = 0; i < n; ++i) {
271       a[i] = b[i] + c[i];
272       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
273 
274       // 'parallel for' implementation using outer and inner loops and PrevEUB
275 
276       // initialize lb and ub to PrevLB and PrevUB
277 
278       // check PrevEUB (using PrevUB instead of NumIt as upper bound)
279 
280       // initialize omp.iv (IV = LB)
281 
282       // outer loop: while (IV < UB) {
283 
284 
285 
286       // skip body branch
287 
288       // IV = IV + 1 and inner loop latch
289 
290       // check NextLB and NextUB
291 
292 
293       [&]() {
294 	a[i] = b[i] + c[i];
295       }();
296     }
297 
298     // schedule: dynamic no chunk
299     #pragma omp target
300     #pragma omp teams
301 
302     #pragma omp distribute parallel for simd schedule(dynamic)
303     for (int i = 0; i < n; ++i) {
304       a[i] = b[i] + c[i];
305       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
306 
307       // 'parallel for' implementation using outer and inner loops and PrevEUB
308 
309       // initialize lb and ub to PrevLB and PrevUB
310 
311 
312       // initialize omp.iv (IV = LB)
313 
314 
315       // skip body branch
316 
317       // IV = IV + 1 and inner loop latch
318 
319       // check NextLB and NextUB
320 
321 
322       [&]() {
323 	a[i] = b[i] + c[i];
324       }();
325     }
326 
327     // schedule: dynamic chunk
328     #pragma omp target
329     #pragma omp teams
330 
331     #pragma omp distribute parallel for simd schedule(dynamic, ch)
332     for (int i = 0; i < n; ++i) {
333       a[i] = b[i] + c[i];
334       // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
335 
336       // 'parallel for' implementation using outer and inner loops and PrevEUB
337 
338       // initialize lb and ub to PrevLB and PrevUB
339 
340 
341       // initialize omp.iv (IV = LB)
342 
343 
344       // skip body branch
345 
346       // IV = IV + 1 and inner loop latch
347 
348       // check NextLB and NextUB
349 
350 
351       [&]() {
352 	a[i] = b[i] + c[i];
353       }();
354     }
355   }();
356   return 0;
357 #else
358 
359 
360 
361 
362 
363 
364 
365 
366 
367   // no schedule clauses
368   #pragma omp target
369   #pragma omp teams
370 
371   #pragma omp distribute parallel for simd
372   for (int i = 0; i < n; ++i) {
373     a[i] = b[i] + c[i];
374 
375 
376     // check EUB for distribute
377 
378     // initialize omp.iv
379 
380     // check exit condition
381 
382     // check that PrevLB and PrevUB are passed to the 'for'
383     // check that distlb and distub are properly passed to fork_call
384 
385     // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
386 
387 
388     // implementation of 'parallel for'
389 
390 
391     // initialize lb and ub to PrevLB and PrevUB
392 
393     // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
394     // In this case we use EUB
395 
396     // initialize omp.iv
397 
398     // check exit condition
399 
400     // check that PrevLB and PrevUB are passed to the 'for'
401 
402     // check stride 1 for 'for' in 'distribute parallel for simd'
403 
404   }
405 
406   // dist_schedule: static no chunk
407   #pragma omp target
408   #pragma omp teams
409 
410   #pragma omp distribute parallel for simd dist_schedule(static)
411   for (int i = 0; i < n; ++i) {
412     a[i] = b[i] + c[i];
413 
414 
415     // check EUB for distribute
416 
417     // initialize omp.iv
418 
419     // check exit condition
420 
421     // check that PrevLB and PrevUB are passed to the 'for'
422     // check that distlb and distub are properly passed to fork_call
423 
424     // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
425 
426 
427     // implementation of 'parallel for'
428 
429 
430     // initialize lb and ub to PrevLB and PrevUB
431 
432     // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
433     // In this case we use EUB
434 
435     // initialize omp.iv
436 
437     // check exit condition
438 
439     // check that PrevLB and PrevUB are passed to the 'for'
440 
441     // check stride 1 for 'for' in 'distribute parallel for simd'
442 
443   }
444 
445   // dist_schedule: static chunk
446   #pragma omp target
447   #pragma omp teams
448 
449   #pragma omp distribute parallel for simd dist_schedule(static, ch)
450   for (int i = 0; i < n; ++i) {
451     a[i] = b[i] + c[i];
452 
453     // unlike the previous tests, in this one we have a outer and inner loop for 'distribute'
454 
455     // check EUB for distribute
456 
457     // initialize omp.iv
458 
459     // check exit condition
460 
461     // check that PrevLB and PrevUB are passed to the 'for'
462     // check that distlb and distub are properly passed to fork_call
463 
464     // check DistInc
465 
466     // Update UB
467 
468     // Store LB in IV
469 
470 
471     // loop exit
472 
473     // skip implementation of 'parallel for': using default scheduling and was tested above
474   }
475 
476   // schedule: static no chunk
477   #pragma omp target
478   #pragma omp teams
479 
480   #pragma omp distribute parallel for simd schedule(static)
481   for (int i = 0; i < n; ++i) {
482     a[i] = b[i] + c[i];
483 
484     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
485 
486     // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
487 
488 
489     // initialize lb and ub to PrevLB and PrevUB
490 
491     // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
492     // In this case we use EUB
493 
494     // initialize omp.iv
495 
496     // check exit condition
497 
498     // check that PrevLB and PrevUB are passed to the 'for'
499 
500     // check stride 1 for 'for' in 'distribute parallel for simd'
501 
502   }
503 
504   // schedule: static chunk
505   #pragma omp target
506   #pragma omp teams
507 
508   #pragma omp distribute parallel for simd schedule(static, ch)
509   for (int i = 0; i < n; ++i) {
510     a[i] = b[i] + c[i];
511     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
512 
513     // 'parallel for' implementation using outer and inner loops and PrevEUB
514 
515     // initialize lb and ub to PrevLB and PrevUB
516 
517     // check PrevEUB (using PrevUB instead of NumIt as upper bound)
518 
519     // initialize omp.iv (IV = LB)
520 
521     // outer loop: while (IV < UB) {
522 
523 
524 
525     // skip body branch
526 
527     // IV = IV + 1 and inner loop latch
528 
529     // check NextLB and NextUB
530 
531 
532   }
533 
534   // schedule: dynamic no chunk
535   #pragma omp target
536   #pragma omp teams
537 
538   #pragma omp distribute parallel for simd schedule(dynamic)
539   for (int i = 0; i < n; ++i) {
540     a[i] = b[i] + c[i];
541     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
542 
543     // 'parallel for' implementation using outer and inner loops and PrevEUB
544 
545     // initialize lb and ub to PrevLB and PrevUB
546 
547 
548     // initialize omp.iv (IV = LB)
549 
550 
551     // skip body branch
552 
553     // IV = IV + 1 and inner loop latch
554 
555     // check NextLB and NextUB
556 
557 
558   }
559 
560   // schedule: dynamic chunk
561   #pragma omp target
562   #pragma omp teams
563 
564   #pragma omp distribute parallel for simd schedule(dynamic, ch)
565   for (int i = 0; i < n; ++i) {
566     a[i] = b[i] + c[i];
567     // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
568 
569     // 'parallel for' implementation using outer and inner loops and PrevEUB
570 
571     // initialize lb and ub to PrevLB and PrevUB
572 
573 
574     // initialize omp.iv (IV = LB)
575 
576 
577     // skip body branch
578 
579     // IV = IV + 1 and inner loop latch
580 
581     // check NextLB and NextUB
582 
583 
584   }
585 
586   return tmain<int>();
587 #endif
588 }
589 
590 // check code
591 
592 
593 
594 
595 
596 
597 
598 
599 
600 
601 
602 // check EUB for distribute
603 
604 // initialize omp.iv
605 
606 // check exit condition
607 
608 // check that PrevLB and PrevUB are passed to the 'for'
609 // check that distlb and distub are properly passed to fork_call
610 
611 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
612 
613 
614 // implementation of 'parallel for'
615 
616 
617 // initialize lb and ub to PrevLB and PrevUB
618 
619 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
620 // In this case we use EUB
621 
622 // initialize omp.iv
623 
624 // check exit condition
625 
626 // check that PrevLB and PrevUB are passed to the 'for'
627 
628 // check stride 1 for 'for' in 'distribute parallel for simd'
629 
630 
631 
632 
633 
634 // check EUB for distribute
635 
636 // initialize omp.iv
637 
638 // check exit condition
639 
640 // check that PrevLB and PrevUB are passed to the 'for'
641 // check that distlb and distub are properly passed to fork_call
642 
643 // increment by stride (distInc - 'parallel for' executes the whole chunk) and latch
644 
645 
646 // implementation of 'parallel for'
647 
648 
649 // initialize lb and ub to PrevLB and PrevUB
650 
651 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
652 // In this case we use EUB
653 
654 // initialize omp.iv
655 
656 // check exit condition
657 
658 // check that PrevLB and PrevUB are passed to the 'for'
659 
660 // check stride 1 for 'for' in 'distribute parallel for simd'
661 
662 
663 
664 
665 // unlike the previous tests, in this one we have a outer and inner loop for 'distribute'
666 
667 // check EUB for distribute
668 
669 // initialize omp.iv
670 
671 // check exit condition
672 
673 // check that PrevLB and PrevUB are passed to the 'for'
674 // check that distlb and distub are properly passed to fork_call
675 
676 // check DistInc
677 
678 // Update UB
679 
680 // Store LB in IV
681 
682 
683 // loop exit
684 
685 // skip implementation of 'parallel for': using default scheduling and was tested above
686 
687 
688 
689 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
690 
691 // 'parallel for' implementation is the same as the case without schedule clase (static no chunk is the default)
692 
693 
694 // initialize lb and ub to PrevLB and PrevUB
695 
696 // PrevEUB is only used when 'for' has a chunked schedule, otherwise EUB is used
697 // In this case we use EUB
698 
699 // initialize omp.iv
700 
701 // check exit condition
702 
703 // check that PrevLB and PrevUB are passed to the 'for'
704 
705 // check stride 1 for 'for' in 'distribute parallel for simd'
706 
707 
708 
709 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
710 
711 // 'parallel for' implementation using outer and inner loops and PrevEUB
712 
713 // initialize lb and ub to PrevLB and PrevUB
714 
715 // check PrevEUB (using PrevUB instead of NumIt as upper bound)
716 
717 // initialize omp.iv (IV = LB)
718 
719 // outer loop: while (IV < UB) {
720 
721 
722 
723 // skip body branch
724 
725 // IV = IV + 1 and inner loop latch
726 
727 // check NextLB and NextUB
728 
729 
730 
731 
732 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
733 
734 // 'parallel for' implementation using outer and inner loops and PrevEUB
735 
736 // initialize lb and ub to PrevLB and PrevUB
737 
738 
739 // initialize omp.iv (IV = LB)
740 
741 
742 // skip body branch
743 
744 // IV = IV + 1 and inner loop latch
745 
746 // check NextLB and NextUB
747 
748 
749 
750 
751 // skip rest of implementation of 'distribute' as it is tested above for default dist_schedule case
752 
753 // 'parallel for' implementation using outer and inner loops and PrevEUB
754 
755 // initialize lb and ub to PrevLB and PrevUB
756 
757 
758 // initialize omp.iv (IV = LB)
759 
760 
761 // skip body branch
762 
763 // IV = IV + 1 and inner loop latch
764 
765 // check NextLB and NextUB
766 
767 
768 
769 #endif
770 // CHECK1-LABEL: define {{[^@]+}}@main
771 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
772 // CHECK1-NEXT:  entry:
773 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
774 // CHECK1-NEXT:    [[A:%.*]] = alloca double*, align 8
775 // CHECK1-NEXT:    [[B:%.*]] = alloca double*, align 8
776 // CHECK1-NEXT:    [[C:%.*]] = alloca double*, align 8
777 // CHECK1-NEXT:    [[N:%.*]] = alloca i32, align 4
778 // CHECK1-NEXT:    [[CH:%.*]] = alloca i32, align 4
779 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
780 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
781 // CHECK1-NEXT:    store i32 10000, i32* [[N]], align 4
782 // CHECK1-NEXT:    store i32 100, i32* [[CH]], align 4
783 // CHECK1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
784 // CHECK1-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
785 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
786 // CHECK1-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
787 // CHECK1-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
788 // CHECK1-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
789 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
790 // CHECK1-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
791 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
792 // CHECK1-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
793 // CHECK1-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(40) [[REF_TMP]])
794 // CHECK1-NEXT:    ret i32 0
795 //
796 //
797 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
798 // CHECK1-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2:[0-9]+]] {
799 // CHECK1-NEXT:  entry:
800 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
801 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
802 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
803 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
804 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
805 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
806 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
807 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
808 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
809 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
810 // CHECK1-NEXT:    ret void
811 //
812 //
813 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
814 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
815 // CHECK1-NEXT:  entry:
816 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
817 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
818 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
819 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
820 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
821 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
822 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
823 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
824 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
825 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
826 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
827 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
828 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
829 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
830 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
831 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
832 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
833 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
834 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
835 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
836 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
837 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
838 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
839 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
840 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
841 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
842 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
843 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
844 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
845 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
846 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
847 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
848 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
849 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
850 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
851 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
852 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
853 // CHECK1:       omp.precond.then:
854 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
855 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
856 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
857 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
858 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
859 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
860 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
861 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
862 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
863 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
864 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
865 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
866 // CHECK1:       cond.true:
867 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
868 // CHECK1-NEXT:    br label [[COND_END:%.*]]
869 // CHECK1:       cond.false:
870 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
871 // CHECK1-NEXT:    br label [[COND_END]]
872 // CHECK1:       cond.end:
873 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
874 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
875 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
876 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
877 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
878 // CHECK1:       omp.inner.for.cond:
879 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
880 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
881 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
882 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
883 // CHECK1:       omp.inner.for.body:
884 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !10
885 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
886 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !10
887 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
888 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !10
889 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
890 // CHECK1:       omp.inner.for.inc:
891 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
892 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !10
893 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
894 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
895 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
896 // CHECK1:       omp.inner.for.end:
897 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
898 // CHECK1:       omp.loop.exit:
899 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
900 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
901 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
902 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
903 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
904 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
905 // CHECK1:       .omp.final.then:
906 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
907 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
908 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
909 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
910 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
911 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
912 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
913 // CHECK1:       .omp.final.done:
914 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
915 // CHECK1:       omp.precond.end:
916 // CHECK1-NEXT:    ret void
917 //
918 //
919 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
920 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
921 // CHECK1-NEXT:  entry:
922 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
923 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
924 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
925 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
926 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
927 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
928 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
929 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
930 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
931 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
932 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
933 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
934 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
935 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
936 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
937 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
938 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
939 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
940 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
941 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
942 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
943 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
944 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
945 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
946 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
947 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
948 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
949 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
950 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
951 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
952 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
953 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
954 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
955 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
956 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
957 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
958 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
959 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
960 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
961 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
962 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
963 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
964 // CHECK1:       omp.precond.then:
965 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
966 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
967 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
968 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
969 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
970 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
971 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
972 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
973 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
974 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
975 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
976 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
977 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
978 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
979 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
980 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
981 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
982 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
983 // CHECK1:       cond.true:
984 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
985 // CHECK1-NEXT:    br label [[COND_END:%.*]]
986 // CHECK1:       cond.false:
987 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
988 // CHECK1-NEXT:    br label [[COND_END]]
989 // CHECK1:       cond.end:
990 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
991 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
992 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
993 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
994 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
995 // CHECK1:       omp.inner.for.cond:
996 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
997 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
998 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
999 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1000 // CHECK1:       omp.inner.for.body:
1001 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1002 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1003 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1004 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !14
1005 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !14
1006 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1007 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1008 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1009 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !14
1010 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !14
1011 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1012 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1013 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1014 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !14
1015 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1016 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !14
1017 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !14
1018 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1019 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1020 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !14
1021 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
1022 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !14
1023 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
1024 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !14
1025 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
1026 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !14
1027 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
1028 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !14
1029 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !14
1030 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1031 // CHECK1:       omp.body.continue:
1032 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1033 // CHECK1:       omp.inner.for.inc:
1034 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1035 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1036 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
1037 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
1038 // CHECK1:       omp.inner.for.end:
1039 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1040 // CHECK1:       omp.loop.exit:
1041 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1042 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1043 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1044 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1045 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1046 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1047 // CHECK1:       .omp.final.then:
1048 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1049 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1050 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1051 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1052 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1053 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1054 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1055 // CHECK1:       .omp.final.done:
1056 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1057 // CHECK1:       omp.precond.end:
1058 // CHECK1-NEXT:    ret void
1059 //
1060 //
1061 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
1062 // CHECK1-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
1063 // CHECK1-NEXT:  entry:
1064 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1065 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1066 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1067 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1068 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1069 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1070 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1071 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1072 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1073 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1074 // CHECK1-NEXT:    ret void
1075 //
1076 //
1077 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
1078 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1079 // CHECK1-NEXT:  entry:
1080 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1081 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1082 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1083 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1084 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1085 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1086 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1087 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1088 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1089 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1090 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1091 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1092 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1093 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1094 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1095 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
1096 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1097 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1098 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1099 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1100 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1101 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1102 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1103 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1104 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1105 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1106 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1107 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1108 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1109 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1110 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1111 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1112 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1113 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1114 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1115 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1116 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1117 // CHECK1:       omp.precond.then:
1118 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1119 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1120 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
1121 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1122 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1123 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1124 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1125 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1126 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1127 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1128 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
1129 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1130 // CHECK1:       cond.true:
1131 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1132 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1133 // CHECK1:       cond.false:
1134 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1135 // CHECK1-NEXT:    br label [[COND_END]]
1136 // CHECK1:       cond.end:
1137 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
1138 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1139 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1140 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
1141 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1142 // CHECK1:       omp.inner.for.cond:
1143 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1144 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
1145 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
1146 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1147 // CHECK1:       omp.inner.for.body:
1148 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !19
1149 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
1150 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !19
1151 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1152 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !19
1153 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1154 // CHECK1:       omp.inner.for.inc:
1155 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1156 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !19
1157 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
1158 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
1159 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
1160 // CHECK1:       omp.inner.for.end:
1161 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1162 // CHECK1:       omp.loop.exit:
1163 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1164 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
1165 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
1166 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1167 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1168 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1169 // CHECK1:       .omp.final.then:
1170 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1171 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
1172 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
1173 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
1174 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
1175 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
1176 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1177 // CHECK1:       .omp.final.done:
1178 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1179 // CHECK1:       omp.precond.end:
1180 // CHECK1-NEXT:    ret void
1181 //
1182 //
1183 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
1184 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1185 // CHECK1-NEXT:  entry:
1186 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1187 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1188 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1189 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1190 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1191 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1192 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1193 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1194 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1195 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1196 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1197 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1198 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1199 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1200 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1201 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1202 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1203 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1204 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
1205 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1206 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1207 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1208 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1209 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1210 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1211 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1212 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1213 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1214 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1215 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1216 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1217 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1218 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1219 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1220 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1221 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1222 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1223 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1224 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1225 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1226 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1227 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1228 // CHECK1:       omp.precond.then:
1229 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1230 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1231 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1232 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1233 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1234 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1235 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1236 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1237 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1238 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1239 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1240 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1241 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1242 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1243 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1244 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1245 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1246 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1247 // CHECK1:       cond.true:
1248 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1249 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1250 // CHECK1:       cond.false:
1251 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1252 // CHECK1-NEXT:    br label [[COND_END]]
1253 // CHECK1:       cond.end:
1254 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1255 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1256 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1257 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1258 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1259 // CHECK1:       omp.inner.for.cond:
1260 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1261 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
1262 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1263 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1264 // CHECK1:       omp.inner.for.body:
1265 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1266 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1267 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1268 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !22
1269 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !22
1270 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1271 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1272 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1273 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !22
1274 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !22
1275 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1276 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1277 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1278 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !22
1279 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1280 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !22
1281 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !22
1282 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1283 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1284 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !22
1285 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
1286 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !22
1287 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
1288 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !22
1289 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
1290 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !22
1291 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
1292 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !22
1293 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !22
1294 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1295 // CHECK1:       omp.body.continue:
1296 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1297 // CHECK1:       omp.inner.for.inc:
1298 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1299 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1300 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
1301 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
1302 // CHECK1:       omp.inner.for.end:
1303 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1304 // CHECK1:       omp.loop.exit:
1305 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1306 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1307 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1308 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1309 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1310 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1311 // CHECK1:       .omp.final.then:
1312 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1313 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1314 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1315 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1316 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1317 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1318 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1319 // CHECK1:       .omp.final.done:
1320 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1321 // CHECK1:       omp.precond.end:
1322 // CHECK1-NEXT:    ret void
1323 //
1324 //
1325 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
1326 // CHECK1-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
1327 // CHECK1-NEXT:  entry:
1328 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
1329 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1330 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1331 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1332 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1333 // CHECK1-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
1334 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1335 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1336 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1337 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1338 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
1339 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1340 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1341 // CHECK1-NEXT:    ret void
1342 //
1343 //
1344 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
1345 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1346 // CHECK1-NEXT:  entry:
1347 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1348 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1349 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
1350 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1351 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1352 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1353 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1354 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1355 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1356 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1357 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1358 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1359 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1360 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1361 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1362 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1363 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
1364 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1365 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1366 // CHECK1-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
1367 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1368 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1369 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1370 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1371 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
1372 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1373 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
1374 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
1375 // CHECK1-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
1376 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
1377 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
1378 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1379 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
1380 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1381 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1382 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1383 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1384 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1385 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
1386 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1387 // CHECK1:       omp.precond.then:
1388 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1389 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1390 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
1391 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1392 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1393 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
1394 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1395 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1396 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
1397 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1398 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1399 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1400 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1401 // CHECK1:       cond.true:
1402 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1403 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1404 // CHECK1:       cond.false:
1405 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1406 // CHECK1-NEXT:    br label [[COND_END]]
1407 // CHECK1:       cond.end:
1408 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1409 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1410 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1411 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1412 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1413 // CHECK1:       omp.inner.for.cond:
1414 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1415 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1416 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
1417 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
1418 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1419 // CHECK1:       omp.inner.for.body:
1420 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1421 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1422 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1423 // CHECK1-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
1424 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !25
1425 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1426 // CHECK1:       omp.inner.for.inc:
1427 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1428 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1429 // CHECK1-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
1430 // CHECK1-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1431 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1432 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1433 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
1434 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1435 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1436 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !25
1437 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
1438 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1439 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1440 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1441 // CHECK1-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
1442 // CHECK1-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
1443 // CHECK1:       cond.true10:
1444 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !25
1445 // CHECK1-NEXT:    br label [[COND_END12:%.*]]
1446 // CHECK1:       cond.false11:
1447 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1448 // CHECK1-NEXT:    br label [[COND_END12]]
1449 // CHECK1:       cond.end12:
1450 // CHECK1-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
1451 // CHECK1-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !25
1452 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !25
1453 // CHECK1-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
1454 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
1455 // CHECK1:       omp.inner.for.end:
1456 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1457 // CHECK1:       omp.loop.exit:
1458 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1459 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
1460 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
1461 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1462 // CHECK1-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
1463 // CHECK1-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1464 // CHECK1:       .omp.final.then:
1465 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1466 // CHECK1-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
1467 // CHECK1-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
1468 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
1469 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
1470 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
1471 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1472 // CHECK1:       .omp.final.done:
1473 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1474 // CHECK1:       omp.precond.end:
1475 // CHECK1-NEXT:    ret void
1476 //
1477 //
1478 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
1479 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1480 // CHECK1-NEXT:  entry:
1481 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1482 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1483 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1484 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1485 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1486 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1487 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1488 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1489 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1490 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1491 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1492 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1493 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1494 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1495 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1496 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1497 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1498 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1499 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 8
1500 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1501 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1502 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1503 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1504 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1505 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1506 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1507 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1508 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1509 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1510 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1511 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1512 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1513 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1514 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1515 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1516 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1517 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1518 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1519 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1520 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1521 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1522 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1523 // CHECK1:       omp.precond.then:
1524 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1525 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1526 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1527 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1528 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1529 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1530 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1531 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1532 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1533 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1534 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1535 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1536 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1537 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1538 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1539 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1540 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1541 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1542 // CHECK1:       cond.true:
1543 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1544 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1545 // CHECK1:       cond.false:
1546 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1547 // CHECK1-NEXT:    br label [[COND_END]]
1548 // CHECK1:       cond.end:
1549 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1550 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1551 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1552 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1553 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1554 // CHECK1:       omp.inner.for.cond:
1555 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1556 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28
1557 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1558 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1559 // CHECK1:       omp.inner.for.body:
1560 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1561 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1562 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1563 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !28
1564 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !28
1565 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1566 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1567 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1568 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !28
1569 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !28
1570 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1571 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1572 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1573 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !28
1574 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1575 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !28
1576 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !28
1577 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1578 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1579 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !28
1580 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
1581 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !28
1582 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
1583 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !28
1584 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
1585 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !28
1586 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
1587 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !28
1588 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !28
1589 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1590 // CHECK1:       omp.body.continue:
1591 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1592 // CHECK1:       omp.inner.for.inc:
1593 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1594 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1595 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28
1596 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
1597 // CHECK1:       omp.inner.for.end:
1598 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1599 // CHECK1:       omp.loop.exit:
1600 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1601 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1602 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1603 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1604 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1605 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1606 // CHECK1:       .omp.final.then:
1607 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1608 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1609 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1610 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1611 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1612 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1613 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1614 // CHECK1:       .omp.final.done:
1615 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1616 // CHECK1:       omp.precond.end:
1617 // CHECK1-NEXT:    ret void
1618 //
1619 //
1620 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
1621 // CHECK1-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
1622 // CHECK1-NEXT:  entry:
1623 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1624 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1625 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1626 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1627 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1628 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1629 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1630 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1631 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1632 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1633 // CHECK1-NEXT:    ret void
1634 //
1635 //
1636 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
1637 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1638 // CHECK1-NEXT:  entry:
1639 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1640 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1641 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1642 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1643 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1644 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1645 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1646 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1647 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1648 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1649 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1650 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1651 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1652 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1653 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1654 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
1655 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1656 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1657 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1658 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1659 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1660 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1661 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1662 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1663 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1664 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1665 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1666 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1667 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1668 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1669 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1670 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1671 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1672 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1673 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1674 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1675 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1676 // CHECK1:       omp.precond.then:
1677 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1678 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1679 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
1680 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1681 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1682 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1683 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
1684 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1685 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1686 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1687 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
1688 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1689 // CHECK1:       cond.true:
1690 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1691 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1692 // CHECK1:       cond.false:
1693 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1694 // CHECK1-NEXT:    br label [[COND_END]]
1695 // CHECK1:       cond.end:
1696 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
1697 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1698 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1699 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
1700 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1701 // CHECK1:       omp.inner.for.cond:
1702 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1703 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
1704 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
1705 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1706 // CHECK1:       omp.inner.for.body:
1707 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31
1708 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
1709 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31
1710 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1711 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !31
1712 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1713 // CHECK1:       omp.inner.for.inc:
1714 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1715 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !31
1716 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
1717 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31
1718 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
1719 // CHECK1:       omp.inner.for.end:
1720 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1721 // CHECK1:       omp.loop.exit:
1722 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1723 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
1724 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
1725 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1726 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
1727 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1728 // CHECK1:       .omp.final.then:
1729 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1730 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
1731 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
1732 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
1733 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
1734 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
1735 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1736 // CHECK1:       .omp.final.done:
1737 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1738 // CHECK1:       omp.precond.end:
1739 // CHECK1-NEXT:    ret void
1740 //
1741 //
1742 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
1743 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1744 // CHECK1-NEXT:  entry:
1745 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1746 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1747 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1748 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1749 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1750 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1751 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1752 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1753 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1754 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1755 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1756 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1757 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1758 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
1759 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
1760 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1761 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1762 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1763 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 8
1764 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1765 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1766 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1767 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1768 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1769 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1770 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1771 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1772 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1773 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
1774 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
1775 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
1776 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
1777 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
1778 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1779 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
1780 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1781 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1782 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1783 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1784 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1785 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
1786 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1787 // CHECK1:       omp.precond.then:
1788 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
1789 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1790 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
1791 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1792 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
1793 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1794 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
1795 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
1796 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
1797 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1798 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1799 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1800 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1801 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1802 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1803 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1804 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1805 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1806 // CHECK1:       cond.true:
1807 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1808 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1809 // CHECK1:       cond.false:
1810 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1811 // CHECK1-NEXT:    br label [[COND_END]]
1812 // CHECK1:       cond.end:
1813 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1814 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1815 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1816 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1817 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1818 // CHECK1:       omp.inner.for.cond:
1819 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1820 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !34
1821 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1822 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1823 // CHECK1:       omp.inner.for.body:
1824 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1825 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
1826 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1827 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !34
1828 // CHECK1-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !34
1829 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1830 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
1831 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
1832 // CHECK1-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !34
1833 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !34
1834 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1835 // CHECK1-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
1836 // CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
1837 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !34
1838 // CHECK1-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
1839 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !34
1840 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !34
1841 // CHECK1-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
1842 // CHECK1-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
1843 // CHECK1-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !34
1844 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
1845 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 8, !llvm.access.group !34
1846 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
1847 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP29]], align 8, !llvm.access.group !34
1848 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
1849 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 8, !llvm.access.group !34
1850 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
1851 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 8, !llvm.access.group !34
1852 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !34
1853 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1854 // CHECK1:       omp.body.continue:
1855 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1856 // CHECK1:       omp.inner.for.inc:
1857 // CHECK1-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1858 // CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], 1
1859 // CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34
1860 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
1861 // CHECK1:       omp.inner.for.end:
1862 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1863 // CHECK1:       omp.loop.exit:
1864 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1865 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
1866 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
1867 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
1868 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
1869 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1870 // CHECK1:       .omp.final.then:
1871 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1872 // CHECK1-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP37]], 0
1873 // CHECK1-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
1874 // CHECK1-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
1875 // CHECK1-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
1876 // CHECK1-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
1877 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
1878 // CHECK1:       .omp.final.done:
1879 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
1880 // CHECK1:       omp.precond.end:
1881 // CHECK1-NEXT:    ret void
1882 //
1883 //
1884 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
1885 // CHECK1-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
1886 // CHECK1-NEXT:  entry:
1887 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
1888 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
1889 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
1890 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
1891 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
1892 // CHECK1-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
1893 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
1894 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
1895 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
1896 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
1897 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
1898 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
1899 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
1900 // CHECK1-NEXT:    ret void
1901 //
1902 //
1903 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14
1904 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
1905 // CHECK1-NEXT:  entry:
1906 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1907 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1908 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
1909 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
1910 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
1911 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
1912 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
1913 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1914 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1915 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1916 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1917 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
1918 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
1919 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1920 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1921 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1922 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1923 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
1924 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
1925 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1926 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1927 // CHECK1-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
1928 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
1929 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
1930 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
1931 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
1932 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
1933 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
1934 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
1935 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
1936 // CHECK1-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
1937 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
1938 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
1939 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
1940 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1941 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1942 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
1943 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1944 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
1945 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
1946 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
1947 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1948 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
1949 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1950 // CHECK1:       omp.precond.then:
1951 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1952 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1953 // CHECK1-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
1954 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1955 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1956 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1957 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1958 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1959 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1960 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1961 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
1962 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1963 // CHECK1:       cond.true:
1964 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1965 // CHECK1-NEXT:    br label [[COND_END:%.*]]
1966 // CHECK1:       cond.false:
1967 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1968 // CHECK1-NEXT:    br label [[COND_END]]
1969 // CHECK1:       cond.end:
1970 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
1971 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1972 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1973 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
1974 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1975 // CHECK1:       omp.inner.for.cond:
1976 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1977 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
1978 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
1979 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1980 // CHECK1:       omp.inner.for.body:
1981 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37
1982 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
1983 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37
1984 // CHECK1-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
1985 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !37
1986 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
1987 // CHECK1-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !37
1988 // CHECK1-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !37
1989 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !37
1990 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1991 // CHECK1:       omp.inner.for.inc:
1992 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1993 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !37
1994 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
1995 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37
1996 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
1997 // CHECK1:       omp.inner.for.end:
1998 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
1999 // CHECK1:       omp.loop.exit:
2000 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2001 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2002 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
2003 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2004 // CHECK1-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
2005 // CHECK1-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2006 // CHECK1:       .omp.final.then:
2007 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2008 // CHECK1-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
2009 // CHECK1-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
2010 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
2011 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
2012 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
2013 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2014 // CHECK1:       .omp.final.done:
2015 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2016 // CHECK1:       omp.precond.end:
2017 // CHECK1-NEXT:    ret void
2018 //
2019 //
2020 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15
2021 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
2022 // CHECK1-NEXT:  entry:
2023 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2024 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2025 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2026 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2027 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2028 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2029 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2030 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2031 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2032 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2033 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2034 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2035 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2036 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2037 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2038 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2039 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2040 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2041 // CHECK1-NEXT:    [[I6:%.*]] = alloca i32, align 4
2042 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 8
2043 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2044 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2045 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2046 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2047 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2048 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2049 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2050 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2051 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2052 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2053 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2054 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2055 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2056 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2057 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2058 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2059 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2060 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2061 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2062 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2063 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2064 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2065 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2066 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2067 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2068 // CHECK1:       omp.precond.then:
2069 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2070 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2071 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2072 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2073 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
2074 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2075 // CHECK1-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
2076 // CHECK1-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
2077 // CHECK1-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
2078 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2079 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2080 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
2081 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2082 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
2083 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
2084 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2085 // CHECK1:       omp.dispatch.cond:
2086 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2087 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2088 // CHECK1-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
2089 // CHECK1-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
2090 // CHECK1-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2091 // CHECK1:       cond.true:
2092 // CHECK1-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2093 // CHECK1-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
2094 // CHECK1-NEXT:    br label [[COND_END:%.*]]
2095 // CHECK1:       cond.false:
2096 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2097 // CHECK1-NEXT:    br label [[COND_END]]
2098 // CHECK1:       cond.end:
2099 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
2100 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2101 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2102 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
2103 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2104 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2105 // CHECK1-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
2106 // CHECK1-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2107 // CHECK1:       omp.dispatch.body:
2108 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2109 // CHECK1:       omp.inner.for.cond:
2110 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2111 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !40
2112 // CHECK1-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
2113 // CHECK1-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2114 // CHECK1:       omp.inner.for.body:
2115 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2116 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
2117 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2118 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !40
2119 // CHECK1-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !40
2120 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2121 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
2122 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
2123 // CHECK1-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !40
2124 // CHECK1-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !40
2125 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2126 // CHECK1-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
2127 // CHECK1-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM12]]
2128 // CHECK1-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX13]], align 8, !llvm.access.group !40
2129 // CHECK1-NEXT:    [[ADD14:%.*]] = fadd double [[TMP25]], [[TMP28]]
2130 // CHECK1-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !40
2131 // CHECK1-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !40
2132 // CHECK1-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
2133 // CHECK1-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM15]]
2134 // CHECK1-NEXT:    store double [[ADD14]], double* [[ARRAYIDX16]], align 8, !llvm.access.group !40
2135 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
2136 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 8, !llvm.access.group !40
2137 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
2138 // CHECK1-NEXT:    store i32* [[I6]], i32** [[TMP32]], align 8, !llvm.access.group !40
2139 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
2140 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 8, !llvm.access.group !40
2141 // CHECK1-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
2142 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 8, !llvm.access.group !40
2143 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !40
2144 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2145 // CHECK1:       omp.body.continue:
2146 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2147 // CHECK1:       omp.inner.for.inc:
2148 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2149 // CHECK1-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP35]], 1
2150 // CHECK1-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40
2151 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
2152 // CHECK1:       omp.inner.for.end:
2153 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2154 // CHECK1:       omp.dispatch.inc:
2155 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2156 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2157 // CHECK1-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
2158 // CHECK1-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
2159 // CHECK1-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2160 // CHECK1-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2161 // CHECK1-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
2162 // CHECK1-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
2163 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
2164 // CHECK1:       omp.dispatch.end:
2165 // CHECK1-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2166 // CHECK1-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
2167 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
2168 // CHECK1-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2169 // CHECK1-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
2170 // CHECK1-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2171 // CHECK1:       .omp.final.then:
2172 // CHECK1-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2173 // CHECK1-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP44]], 0
2174 // CHECK1-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
2175 // CHECK1-NEXT:    [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
2176 // CHECK1-NEXT:    [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
2177 // CHECK1-NEXT:    store i32 [[ADD23]], i32* [[I6]], align 4
2178 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2179 // CHECK1:       .omp.final.done:
2180 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2181 // CHECK1:       omp.precond.end:
2182 // CHECK1-NEXT:    ret void
2183 //
2184 //
2185 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
2186 // CHECK1-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
2187 // CHECK1-NEXT:  entry:
2188 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
2189 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
2190 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
2191 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
2192 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
2193 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
2194 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
2195 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
2196 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2197 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2198 // CHECK1-NEXT:    ret void
2199 //
2200 //
2201 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..18
2202 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2203 // CHECK1-NEXT:  entry:
2204 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2205 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2206 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2207 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2208 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2209 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2210 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2211 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2212 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2213 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2214 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2215 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2216 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2217 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2218 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2219 // CHECK1-NEXT:    [[I3:%.*]] = alloca i32, align 4
2220 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2221 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2222 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2223 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2224 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2225 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2226 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2227 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2228 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2229 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2230 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2231 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2232 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2233 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2234 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2235 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2236 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2237 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2238 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2239 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2240 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2241 // CHECK1:       omp.precond.then:
2242 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2243 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2244 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
2245 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2246 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2247 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2248 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2249 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2250 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2251 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2252 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
2253 // CHECK1-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2254 // CHECK1:       cond.true:
2255 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2256 // CHECK1-NEXT:    br label [[COND_END:%.*]]
2257 // CHECK1:       cond.false:
2258 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2259 // CHECK1-NEXT:    br label [[COND_END]]
2260 // CHECK1:       cond.end:
2261 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
2262 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2263 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2264 // CHECK1-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
2265 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2266 // CHECK1:       omp.inner.for.cond:
2267 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2268 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
2269 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
2270 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2271 // CHECK1:       omp.inner.for.body:
2272 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !43
2273 // CHECK1-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
2274 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !43
2275 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2276 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !43
2277 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2278 // CHECK1:       omp.inner.for.inc:
2279 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2280 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !43
2281 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2282 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43
2283 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
2284 // CHECK1:       omp.inner.for.end:
2285 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2286 // CHECK1:       omp.loop.exit:
2287 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2288 // CHECK1-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
2289 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
2290 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2291 // CHECK1-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
2292 // CHECK1-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2293 // CHECK1:       .omp.final.then:
2294 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2295 // CHECK1-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
2296 // CHECK1-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
2297 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
2298 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
2299 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
2300 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2301 // CHECK1:       .omp.final.done:
2302 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2303 // CHECK1:       omp.precond.end:
2304 // CHECK1-NEXT:    ret void
2305 //
2306 //
2307 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..19
2308 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2309 // CHECK1-NEXT:  entry:
2310 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2311 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2312 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2313 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2314 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2315 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2316 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2317 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2318 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2319 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2320 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2321 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2322 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2323 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2324 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2325 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2326 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2327 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
2328 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 8
2329 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2330 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2331 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2332 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2333 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2334 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2335 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2336 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2337 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2338 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2339 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2340 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2341 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2342 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2343 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2344 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2345 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2346 // CHECK1-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2347 // CHECK1-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2348 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2349 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2350 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2351 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2352 // CHECK1:       omp.precond.then:
2353 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2354 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2355 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2356 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2357 // CHECK1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
2358 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2359 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
2360 // CHECK1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2361 // CHECK1-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
2362 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2363 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2364 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2365 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2366 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2367 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
2368 // CHECK1-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
2369 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2370 // CHECK1:       omp.dispatch.cond:
2371 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2372 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
2373 // CHECK1-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2374 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
2375 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2376 // CHECK1:       omp.dispatch.body:
2377 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2378 // CHECK1-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
2379 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2380 // CHECK1:       omp.inner.for.cond:
2381 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2382 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !46
2383 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
2384 // CHECK1-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2385 // CHECK1:       omp.inner.for.body:
2386 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2387 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
2388 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2389 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !46
2390 // CHECK1-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !46
2391 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2392 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
2393 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
2394 // CHECK1-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !46
2395 // CHECK1-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !46
2396 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2397 // CHECK1-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
2398 // CHECK1-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
2399 // CHECK1-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !46
2400 // CHECK1-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
2401 // CHECK1-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !46
2402 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !46
2403 // CHECK1-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
2404 // CHECK1-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
2405 // CHECK1-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !46
2406 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
2407 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 8, !llvm.access.group !46
2408 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
2409 // CHECK1-NEXT:    store i32* [[I4]], i32** [[TMP30]], align 8, !llvm.access.group !46
2410 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
2411 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 8, !llvm.access.group !46
2412 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
2413 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 8, !llvm.access.group !46
2414 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !46
2415 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2416 // CHECK1:       omp.body.continue:
2417 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2418 // CHECK1:       omp.inner.for.inc:
2419 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2420 // CHECK1-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP33]], 1
2421 // CHECK1-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46
2422 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
2423 // CHECK1:       omp.inner.for.end:
2424 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2425 // CHECK1:       omp.dispatch.inc:
2426 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
2427 // CHECK1:       omp.dispatch.end:
2428 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2429 // CHECK1-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
2430 // CHECK1-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2431 // CHECK1:       .omp.final.then:
2432 // CHECK1-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2433 // CHECK1-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP36]], 0
2434 // CHECK1-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
2435 // CHECK1-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
2436 // CHECK1-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
2437 // CHECK1-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
2438 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2439 // CHECK1:       .omp.final.done:
2440 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2441 // CHECK1:       omp.precond.end:
2442 // CHECK1-NEXT:    ret void
2443 //
2444 //
2445 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
2446 // CHECK1-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
2447 // CHECK1-NEXT:  entry:
2448 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
2449 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
2450 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
2451 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
2452 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
2453 // CHECK1-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
2454 // CHECK1-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
2455 // CHECK1-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
2456 // CHECK1-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
2457 // CHECK1-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
2458 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
2459 // CHECK1-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
2460 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2461 // CHECK1-NEXT:    ret void
2462 //
2463 //
2464 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..22
2465 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR2]] {
2466 // CHECK1-NEXT:  entry:
2467 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2468 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2469 // CHECK1-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
2470 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2471 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2472 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2473 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2474 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2475 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2476 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2477 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2478 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2479 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2480 // CHECK1-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2481 // CHECK1-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2482 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2483 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2484 // CHECK1-NEXT:    [[I4:%.*]] = alloca i32, align 4
2485 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
2486 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2487 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2488 // CHECK1-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
2489 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2490 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2491 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2492 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2493 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
2494 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2495 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
2496 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
2497 // CHECK1-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
2498 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
2499 // CHECK1-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
2500 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
2501 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2502 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2503 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
2504 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2505 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2506 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2507 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2508 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2509 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
2510 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2511 // CHECK1:       omp.precond.then:
2512 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2513 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2514 // CHECK1-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
2515 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2516 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2517 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2518 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
2519 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2520 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2521 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2522 // CHECK1-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
2523 // CHECK1-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2524 // CHECK1:       cond.true:
2525 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2526 // CHECK1-NEXT:    br label [[COND_END:%.*]]
2527 // CHECK1:       cond.false:
2528 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2529 // CHECK1-NEXT:    br label [[COND_END]]
2530 // CHECK1:       cond.end:
2531 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
2532 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2533 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2534 // CHECK1-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
2535 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2536 // CHECK1:       omp.inner.for.cond:
2537 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2538 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
2539 // CHECK1-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
2540 // CHECK1-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2541 // CHECK1:       omp.inner.for.body:
2542 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !49
2543 // CHECK1-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
2544 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !49
2545 // CHECK1-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
2546 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !49
2547 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
2548 // CHECK1-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !49
2549 // CHECK1-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !49
2550 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !49
2551 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2552 // CHECK1:       omp.inner.for.inc:
2553 // CHECK1-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2554 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !49
2555 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
2556 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !49
2557 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
2558 // CHECK1:       omp.inner.for.end:
2559 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2560 // CHECK1:       omp.loop.exit:
2561 // CHECK1-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2562 // CHECK1-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2563 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
2564 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2565 // CHECK1-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
2566 // CHECK1-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2567 // CHECK1:       .omp.final.then:
2568 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2569 // CHECK1-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
2570 // CHECK1-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
2571 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
2572 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
2573 // CHECK1-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
2574 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2575 // CHECK1:       .omp.final.done:
2576 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2577 // CHECK1:       omp.precond.end:
2578 // CHECK1-NEXT:    ret void
2579 //
2580 //
2581 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..23
2582 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
2583 // CHECK1-NEXT:  entry:
2584 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2585 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2586 // CHECK1-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2587 // CHECK1-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2588 // CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
2589 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
2590 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
2591 // CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
2592 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
2593 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2594 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2595 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2596 // CHECK1-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
2597 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
2598 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2599 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2600 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2601 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2602 // CHECK1-NEXT:    [[I6:%.*]] = alloca i32, align 4
2603 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 8
2604 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2605 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2606 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2607 // CHECK1-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2608 // CHECK1-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
2609 // CHECK1-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
2610 // CHECK1-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
2611 // CHECK1-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
2612 // CHECK1-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
2613 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
2614 // CHECK1-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
2615 // CHECK1-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
2616 // CHECK1-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
2617 // CHECK1-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
2618 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2619 // CHECK1-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2620 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2621 // CHECK1-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2622 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2623 // CHECK1-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
2624 // CHECK1-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
2625 // CHECK1-NEXT:    store i32 0, i32* [[I]], align 4
2626 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2627 // CHECK1-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2628 // CHECK1-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2629 // CHECK1:       omp.precond.then:
2630 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2631 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
2632 // CHECK1-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2633 // CHECK1-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2634 // CHECK1-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
2635 // CHECK1-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2636 // CHECK1-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
2637 // CHECK1-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
2638 // CHECK1-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
2639 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2640 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2641 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
2642 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2643 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2644 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2645 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
2646 // CHECK1-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
2647 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
2648 // CHECK1:       omp.dispatch.cond:
2649 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2650 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
2651 // CHECK1-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
2652 // CHECK1-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
2653 // CHECK1-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2654 // CHECK1:       omp.dispatch.body:
2655 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2656 // CHECK1-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
2657 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2658 // CHECK1:       omp.inner.for.cond:
2659 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2660 // CHECK1-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !52
2661 // CHECK1-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
2662 // CHECK1-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2663 // CHECK1:       omp.inner.for.body:
2664 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2665 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
2666 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2667 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !52
2668 // CHECK1-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !52
2669 // CHECK1-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2670 // CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
2671 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
2672 // CHECK1-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !52
2673 // CHECK1-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !52
2674 // CHECK1-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2675 // CHECK1-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
2676 // CHECK1-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
2677 // CHECK1-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !52
2678 // CHECK1-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
2679 // CHECK1-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !52
2680 // CHECK1-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !52
2681 // CHECK1-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
2682 // CHECK1-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
2683 // CHECK1-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !52
2684 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
2685 // CHECK1-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 8, !llvm.access.group !52
2686 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
2687 // CHECK1-NEXT:    store i32* [[I6]], i32** [[TMP31]], align 8, !llvm.access.group !52
2688 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
2689 // CHECK1-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 8, !llvm.access.group !52
2690 // CHECK1-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
2691 // CHECK1-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 8, !llvm.access.group !52
2692 // CHECK1-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !52
2693 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2694 // CHECK1:       omp.body.continue:
2695 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2696 // CHECK1:       omp.inner.for.inc:
2697 // CHECK1-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2698 // CHECK1-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], 1
2699 // CHECK1-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52
2700 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
2701 // CHECK1:       omp.inner.for.end:
2702 // CHECK1-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
2703 // CHECK1:       omp.dispatch.inc:
2704 // CHECK1-NEXT:    br label [[OMP_DISPATCH_COND]]
2705 // CHECK1:       omp.dispatch.end:
2706 // CHECK1-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2707 // CHECK1-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
2708 // CHECK1-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2709 // CHECK1:       .omp.final.then:
2710 // CHECK1-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2711 // CHECK1-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP37]], 0
2712 // CHECK1-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
2713 // CHECK1-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
2714 // CHECK1-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
2715 // CHECK1-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
2716 // CHECK1-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2717 // CHECK1:       .omp.final.done:
2718 // CHECK1-NEXT:    br label [[OMP_PRECOND_END]]
2719 // CHECK1:       omp.precond.end:
2720 // CHECK1-NEXT:    ret void
2721 //
2722 //
2723 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2724 // CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
2725 // CHECK1-NEXT:  entry:
2726 // CHECK1-NEXT:    call void @__tgt_register_requires(i64 1)
2727 // CHECK1-NEXT:    ret void
2728 //
2729 //
2730 // CHECK3-LABEL: define {{[^@]+}}@main
2731 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
2732 // CHECK3-NEXT:  entry:
2733 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2734 // CHECK3-NEXT:    [[A:%.*]] = alloca double*, align 4
2735 // CHECK3-NEXT:    [[B:%.*]] = alloca double*, align 4
2736 // CHECK3-NEXT:    [[C:%.*]] = alloca double*, align 4
2737 // CHECK3-NEXT:    [[N:%.*]] = alloca i32, align 4
2738 // CHECK3-NEXT:    [[CH:%.*]] = alloca i32, align 4
2739 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
2740 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2741 // CHECK3-NEXT:    store i32 10000, i32* [[N]], align 4
2742 // CHECK3-NEXT:    store i32 100, i32* [[CH]], align 4
2743 // CHECK3-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
2744 // CHECK3-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
2745 // CHECK3-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
2746 // CHECK3-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
2747 // CHECK3-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
2748 // CHECK3-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
2749 // CHECK3-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
2750 // CHECK3-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
2751 // CHECK3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
2752 // CHECK3-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
2753 // CHECK3-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 4 dereferenceable(20) [[REF_TMP]])
2754 // CHECK3-NEXT:    ret i32 0
2755 //
2756 //
2757 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116
2758 // CHECK3-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2:[0-9]+]] {
2759 // CHECK3-NEXT:  entry:
2760 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2761 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
2762 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
2763 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
2764 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2765 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
2766 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
2767 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
2768 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
2769 // CHECK3-NEXT:    ret void
2770 //
2771 //
2772 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
2773 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
2774 // CHECK3-NEXT:  entry:
2775 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2776 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2777 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
2778 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
2779 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
2780 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
2781 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2782 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2783 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2784 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2785 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2786 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2787 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2788 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2789 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2790 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
2791 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2792 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2793 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
2794 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
2795 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
2796 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
2797 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
2798 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
2799 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
2800 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
2801 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2802 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2803 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2804 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2805 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2806 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2807 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2808 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
2809 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2810 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2811 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2812 // CHECK3:       omp.precond.then:
2813 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2814 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2815 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
2816 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2817 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2818 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2819 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
2820 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2821 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2822 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2823 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
2824 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2825 // CHECK3:       cond.true:
2826 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2827 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2828 // CHECK3:       cond.false:
2829 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2830 // CHECK3-NEXT:    br label [[COND_END]]
2831 // CHECK3:       cond.end:
2832 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
2833 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2834 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2835 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
2836 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2837 // CHECK3:       omp.inner.for.cond:
2838 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
2839 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
2840 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
2841 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2842 // CHECK3:       omp.inner.for.body:
2843 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11
2844 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11
2845 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !11
2846 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2847 // CHECK3:       omp.inner.for.inc:
2848 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
2849 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11
2850 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
2851 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
2852 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
2853 // CHECK3:       omp.inner.for.end:
2854 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2855 // CHECK3:       omp.loop.exit:
2856 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2857 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
2858 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
2859 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2860 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
2861 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2862 // CHECK3:       .omp.final.then:
2863 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2864 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
2865 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
2866 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
2867 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
2868 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
2869 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
2870 // CHECK3:       .omp.final.done:
2871 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
2872 // CHECK3:       omp.precond.end:
2873 // CHECK3-NEXT:    ret void
2874 //
2875 //
2876 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
2877 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
2878 // CHECK3-NEXT:  entry:
2879 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2880 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2881 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2882 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2883 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
2884 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
2885 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
2886 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
2887 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2888 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2889 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2890 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2891 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
2892 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
2893 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
2894 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2895 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2896 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
2897 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 4
2898 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2899 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2900 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2901 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2902 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
2903 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
2904 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
2905 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
2906 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
2907 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
2908 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
2909 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
2910 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
2911 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
2912 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2913 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
2914 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2915 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2916 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2917 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
2918 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2919 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
2920 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2921 // CHECK3:       omp.precond.then:
2922 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
2923 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2924 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
2925 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2926 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2927 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
2928 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
2929 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2930 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2931 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2932 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
2933 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2934 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2935 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2936 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
2937 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2938 // CHECK3:       cond.true:
2939 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2940 // CHECK3-NEXT:    br label [[COND_END:%.*]]
2941 // CHECK3:       cond.false:
2942 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2943 // CHECK3-NEXT:    br label [[COND_END]]
2944 // CHECK3:       cond.end:
2945 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
2946 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2947 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2948 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
2949 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2950 // CHECK3:       omp.inner.for.cond:
2951 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
2952 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
2953 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
2954 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2955 // CHECK3:       omp.inner.for.body:
2956 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
2957 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
2958 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2959 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !15
2960 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !15
2961 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
2962 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
2963 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !15
2964 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !15
2965 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
2966 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
2967 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !15
2968 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
2969 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !15
2970 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15
2971 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
2972 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !15
2973 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
2974 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !15
2975 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
2976 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !15
2977 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
2978 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !15
2979 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
2980 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !15
2981 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !15
2982 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2983 // CHECK3:       omp.body.continue:
2984 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2985 // CHECK3:       omp.inner.for.inc:
2986 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
2987 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
2988 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
2989 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
2990 // CHECK3:       omp.inner.for.end:
2991 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
2992 // CHECK3:       omp.loop.exit:
2993 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2994 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
2995 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
2996 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
2997 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
2998 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2999 // CHECK3:       .omp.final.then:
3000 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3001 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
3002 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
3003 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
3004 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
3005 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
3006 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3007 // CHECK3:       .omp.final.done:
3008 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
3009 // CHECK3:       omp.precond.end:
3010 // CHECK3-NEXT:    ret void
3011 //
3012 //
3013 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
3014 // CHECK3-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
3015 // CHECK3-NEXT:  entry:
3016 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3017 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
3018 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
3019 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
3020 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3021 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
3022 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
3023 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
3024 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3025 // CHECK3-NEXT:    ret void
3026 //
3027 //
3028 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
3029 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3030 // CHECK3-NEXT:  entry:
3031 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3032 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3033 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
3034 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
3035 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
3036 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
3037 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3038 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3039 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3040 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3041 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3042 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3043 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3044 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3045 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3046 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
3047 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3048 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3049 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
3050 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
3051 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
3052 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
3053 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3054 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3055 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3056 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3057 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3058 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3059 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3060 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3061 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3062 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3063 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3064 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
3065 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3066 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3067 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3068 // CHECK3:       omp.precond.then:
3069 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3070 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3071 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
3072 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3073 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3074 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3075 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
3076 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3077 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3078 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3079 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
3080 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3081 // CHECK3:       cond.true:
3082 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3083 // CHECK3-NEXT:    br label [[COND_END:%.*]]
3084 // CHECK3:       cond.false:
3085 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3086 // CHECK3-NEXT:    br label [[COND_END]]
3087 // CHECK3:       cond.end:
3088 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
3089 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3090 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3091 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
3092 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3093 // CHECK3:       omp.inner.for.cond:
3094 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
3095 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
3096 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
3097 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3098 // CHECK3:       omp.inner.for.body:
3099 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20
3100 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20
3101 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !20
3102 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3103 // CHECK3:       omp.inner.for.inc:
3104 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
3105 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20
3106 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
3107 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20
3108 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
3109 // CHECK3:       omp.inner.for.end:
3110 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3111 // CHECK3:       omp.loop.exit:
3112 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3113 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
3114 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
3115 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3116 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
3117 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3118 // CHECK3:       .omp.final.then:
3119 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3120 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
3121 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
3122 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
3123 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
3124 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
3125 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3126 // CHECK3:       .omp.final.done:
3127 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
3128 // CHECK3:       omp.precond.end:
3129 // CHECK3-NEXT:    ret void
3130 //
3131 //
3132 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
3133 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3134 // CHECK3-NEXT:  entry:
3135 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3136 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3137 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3138 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3139 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
3140 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
3141 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
3142 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
3143 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3144 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3145 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3146 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3147 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3148 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3149 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3150 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3151 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3152 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
3153 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 4
3154 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3155 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3156 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3157 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3158 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
3159 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
3160 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
3161 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
3162 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3163 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3164 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3165 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3166 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3167 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3168 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3169 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3170 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3171 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3172 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3173 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
3174 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3175 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3176 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3177 // CHECK3:       omp.precond.then:
3178 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3179 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3180 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3181 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3182 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3183 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
3184 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
3185 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3186 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3187 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3188 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3189 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3190 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3191 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3192 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3193 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3194 // CHECK3:       cond.true:
3195 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3196 // CHECK3-NEXT:    br label [[COND_END:%.*]]
3197 // CHECK3:       cond.false:
3198 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3199 // CHECK3-NEXT:    br label [[COND_END]]
3200 // CHECK3:       cond.end:
3201 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3202 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3203 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3204 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3205 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3206 // CHECK3:       omp.inner.for.cond:
3207 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
3208 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23
3209 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3210 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3211 // CHECK3:       omp.inner.for.body:
3212 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
3213 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3214 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3215 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !23
3216 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !23
3217 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
3218 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
3219 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !23
3220 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !23
3221 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
3222 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
3223 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !23
3224 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
3225 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !23
3226 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !23
3227 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
3228 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !23
3229 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
3230 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !23
3231 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1
3232 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !23
3233 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2
3234 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !23
3235 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3
3236 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !23
3237 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE0_clEv"(%class.anon.1* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !23
3238 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3239 // CHECK3:       omp.body.continue:
3240 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3241 // CHECK3:       omp.inner.for.inc:
3242 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
3243 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
3244 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23
3245 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
3246 // CHECK3:       omp.inner.for.end:
3247 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3248 // CHECK3:       omp.loop.exit:
3249 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3250 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3251 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3252 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3253 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3254 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3255 // CHECK3:       .omp.final.then:
3256 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3257 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
3258 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
3259 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
3260 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
3261 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
3262 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3263 // CHECK3:       .omp.final.done:
3264 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
3265 // CHECK3:       omp.precond.end:
3266 // CHECK3-NEXT:    ret void
3267 //
3268 //
3269 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l201
3270 // CHECK3-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
3271 // CHECK3-NEXT:  entry:
3272 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
3273 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3274 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
3275 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
3276 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
3277 // CHECK3-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
3278 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3279 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
3280 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
3281 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
3282 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3283 // CHECK3-NEXT:    ret void
3284 //
3285 //
3286 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..6
3287 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3288 // CHECK3-NEXT:  entry:
3289 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3290 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3291 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
3292 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
3293 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
3294 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
3295 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
3296 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3297 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3298 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3299 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3300 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3301 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3302 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3303 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3304 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3305 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
3306 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3307 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3308 // CHECK3-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
3309 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
3310 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
3311 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
3312 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
3313 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
3314 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3315 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
3316 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
3317 // CHECK3-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
3318 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
3319 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
3320 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3321 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
3322 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3323 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3324 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3325 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
3326 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3327 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
3328 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3329 // CHECK3:       omp.precond.then:
3330 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3331 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3332 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
3333 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3334 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3335 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
3336 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3337 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3338 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
3339 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3340 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3341 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3342 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3343 // CHECK3:       cond.true:
3344 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3345 // CHECK3-NEXT:    br label [[COND_END:%.*]]
3346 // CHECK3:       cond.false:
3347 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3348 // CHECK3-NEXT:    br label [[COND_END]]
3349 // CHECK3:       cond.end:
3350 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3351 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3352 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3353 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3354 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3355 // CHECK3:       omp.inner.for.cond:
3356 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
3357 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
3358 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
3359 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
3360 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3361 // CHECK3:       omp.inner.for.body:
3362 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
3363 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3364 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !26
3365 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3366 // CHECK3:       omp.inner.for.inc:
3367 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
3368 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
3369 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
3370 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
3371 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
3372 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
3373 // CHECK3-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
3374 // CHECK3-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
3375 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3376 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
3377 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
3378 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3379 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3380 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
3381 // CHECK3-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
3382 // CHECK3-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
3383 // CHECK3:       cond.true10:
3384 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !26
3385 // CHECK3-NEXT:    br label [[COND_END12:%.*]]
3386 // CHECK3:       cond.false11:
3387 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3388 // CHECK3-NEXT:    br label [[COND_END12]]
3389 // CHECK3:       cond.end12:
3390 // CHECK3-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
3391 // CHECK3-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
3392 // CHECK3-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
3393 // CHECK3-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
3394 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
3395 // CHECK3:       omp.inner.for.end:
3396 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3397 // CHECK3:       omp.loop.exit:
3398 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3399 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
3400 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
3401 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3402 // CHECK3-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
3403 // CHECK3-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3404 // CHECK3:       .omp.final.then:
3405 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3406 // CHECK3-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
3407 // CHECK3-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
3408 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
3409 // CHECK3-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
3410 // CHECK3-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
3411 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3412 // CHECK3:       .omp.final.done:
3413 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
3414 // CHECK3:       omp.precond.end:
3415 // CHECK3-NEXT:    ret void
3416 //
3417 //
3418 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
3419 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3420 // CHECK3-NEXT:  entry:
3421 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3422 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3423 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3424 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3425 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
3426 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
3427 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
3428 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
3429 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3430 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3431 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3432 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3433 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3434 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3435 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3436 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3437 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3438 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
3439 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 4
3440 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3441 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3442 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3443 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3444 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
3445 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
3446 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
3447 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
3448 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3449 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3450 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3451 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3452 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3453 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3454 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3455 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3456 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3457 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3458 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3459 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
3460 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3461 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3462 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3463 // CHECK3:       omp.precond.then:
3464 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3465 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3466 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3467 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3468 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3469 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
3470 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
3471 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3472 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3473 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3474 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3475 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3476 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3477 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3478 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3479 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3480 // CHECK3:       cond.true:
3481 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3482 // CHECK3-NEXT:    br label [[COND_END:%.*]]
3483 // CHECK3:       cond.false:
3484 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3485 // CHECK3-NEXT:    br label [[COND_END]]
3486 // CHECK3:       cond.end:
3487 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3488 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3489 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3490 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3491 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3492 // CHECK3:       omp.inner.for.cond:
3493 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
3494 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
3495 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3496 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3497 // CHECK3:       omp.inner.for.body:
3498 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
3499 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3500 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3501 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !29
3502 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !29
3503 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
3504 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
3505 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !29
3506 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !29
3507 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
3508 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
3509 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !29
3510 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
3511 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !29
3512 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !29
3513 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
3514 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !29
3515 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 0
3516 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !29
3517 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 1
3518 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !29
3519 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 2
3520 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !29
3521 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_2]], %class.anon.2* [[REF_TMP]], i32 0, i32 3
3522 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !29
3523 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE1_clEv"(%class.anon.2* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !29
3524 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3525 // CHECK3:       omp.body.continue:
3526 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3527 // CHECK3:       omp.inner.for.inc:
3528 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
3529 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
3530 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
3531 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
3532 // CHECK3:       omp.inner.for.end:
3533 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3534 // CHECK3:       omp.loop.exit:
3535 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3536 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3537 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3538 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3539 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3540 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3541 // CHECK3:       .omp.final.then:
3542 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3543 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
3544 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
3545 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
3546 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
3547 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
3548 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3549 // CHECK3:       .omp.final.done:
3550 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
3551 // CHECK3:       omp.precond.end:
3552 // CHECK3-NEXT:    ret void
3553 //
3554 //
3555 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l234
3556 // CHECK3-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
3557 // CHECK3-NEXT:  entry:
3558 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3559 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
3560 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
3561 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
3562 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3563 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
3564 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
3565 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
3566 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3567 // CHECK3-NEXT:    ret void
3568 //
3569 //
3570 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
3571 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3572 // CHECK3-NEXT:  entry:
3573 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3574 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3575 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
3576 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
3577 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
3578 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
3579 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3580 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3581 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3582 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3583 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3584 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3585 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3586 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3587 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3588 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
3589 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3590 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3591 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
3592 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
3593 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
3594 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
3595 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3596 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3597 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3598 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3599 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3600 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3601 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3602 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3603 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3604 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3605 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3606 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
3607 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3608 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3609 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3610 // CHECK3:       omp.precond.then:
3611 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3612 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3613 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
3614 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3615 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3616 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3617 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
3618 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3619 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3620 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3621 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
3622 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3623 // CHECK3:       cond.true:
3624 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3625 // CHECK3-NEXT:    br label [[COND_END:%.*]]
3626 // CHECK3:       cond.false:
3627 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3628 // CHECK3-NEXT:    br label [[COND_END]]
3629 // CHECK3:       cond.end:
3630 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
3631 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3632 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3633 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
3634 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3635 // CHECK3:       omp.inner.for.cond:
3636 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
3637 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
3638 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
3639 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3640 // CHECK3:       omp.inner.for.body:
3641 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
3642 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
3643 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !32
3644 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3645 // CHECK3:       omp.inner.for.inc:
3646 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
3647 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
3648 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
3649 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
3650 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
3651 // CHECK3:       omp.inner.for.end:
3652 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3653 // CHECK3:       omp.loop.exit:
3654 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3655 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
3656 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
3657 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3658 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
3659 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3660 // CHECK3:       .omp.final.then:
3661 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3662 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
3663 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
3664 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
3665 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
3666 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
3667 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3668 // CHECK3:       .omp.final.done:
3669 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
3670 // CHECK3:       omp.precond.end:
3671 // CHECK3-NEXT:    ret void
3672 //
3673 //
3674 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11
3675 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3676 // CHECK3-NEXT:  entry:
3677 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3678 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3679 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3680 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3681 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
3682 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
3683 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
3684 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
3685 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3686 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3687 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3688 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3689 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3690 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3691 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3692 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3693 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3694 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
3695 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_3:%.*]], align 4
3696 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3697 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3698 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3699 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3700 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
3701 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
3702 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
3703 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
3704 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3705 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3706 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3707 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3708 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3709 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
3710 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3711 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3712 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3713 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3714 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3715 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
3716 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3717 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3718 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3719 // CHECK3:       omp.precond.then:
3720 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3721 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3722 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3723 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3724 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3725 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
3726 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
3727 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3728 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3729 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3730 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3731 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3732 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3733 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3734 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3735 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3736 // CHECK3:       cond.true:
3737 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3738 // CHECK3-NEXT:    br label [[COND_END:%.*]]
3739 // CHECK3:       cond.false:
3740 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3741 // CHECK3-NEXT:    br label [[COND_END]]
3742 // CHECK3:       cond.end:
3743 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3744 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3745 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3746 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3747 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3748 // CHECK3:       omp.inner.for.cond:
3749 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
3750 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
3751 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3752 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3753 // CHECK3:       omp.inner.for.body:
3754 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
3755 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
3756 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3757 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !35
3758 // CHECK3-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !35
3759 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
3760 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
3761 // CHECK3-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !35
3762 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !35
3763 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
3764 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
3765 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !35
3766 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
3767 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !35
3768 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !35
3769 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
3770 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !35
3771 // CHECK3-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 0
3772 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP28]], align 4, !llvm.access.group !35
3773 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 1
3774 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP29]], align 4, !llvm.access.group !35
3775 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 2
3776 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP30]], align 4, !llvm.access.group !35
3777 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_3]], %class.anon.3* [[REF_TMP]], i32 0, i32 3
3778 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP31]], align 4, !llvm.access.group !35
3779 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE2_clEv"(%class.anon.3* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !35
3780 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
3781 // CHECK3:       omp.body.continue:
3782 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3783 // CHECK3:       omp.inner.for.inc:
3784 // CHECK3-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
3785 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP32]], 1
3786 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
3787 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
3788 // CHECK3:       omp.inner.for.end:
3789 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3790 // CHECK3:       omp.loop.exit:
3791 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3792 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
3793 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
3794 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3795 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
3796 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3797 // CHECK3:       .omp.final.then:
3798 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3799 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
3800 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
3801 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
3802 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
3803 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
3804 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3805 // CHECK3:       .omp.final.done:
3806 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
3807 // CHECK3:       omp.precond.end:
3808 // CHECK3-NEXT:    ret void
3809 //
3810 //
3811 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l266
3812 // CHECK3-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
3813 // CHECK3-NEXT:  entry:
3814 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
3815 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
3816 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
3817 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
3818 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
3819 // CHECK3-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
3820 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
3821 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
3822 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
3823 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
3824 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
3825 // CHECK3-NEXT:    ret void
3826 //
3827 //
3828 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14
3829 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
3830 // CHECK3-NEXT:  entry:
3831 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3832 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3833 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
3834 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
3835 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
3836 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
3837 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
3838 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3839 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3840 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3841 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3842 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3843 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3844 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3845 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3846 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3847 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3848 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
3849 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
3850 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3851 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3852 // CHECK3-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
3853 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
3854 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
3855 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
3856 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
3857 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
3858 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3859 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
3860 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
3861 // CHECK3-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
3862 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
3863 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
3864 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
3865 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3866 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3867 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
3868 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3869 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
3870 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
3871 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
3872 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3873 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
3874 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3875 // CHECK3:       omp.precond.then:
3876 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
3877 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3878 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
3879 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3880 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3881 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3882 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
3883 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3884 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3885 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3886 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
3887 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3888 // CHECK3:       cond.true:
3889 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3890 // CHECK3-NEXT:    br label [[COND_END:%.*]]
3891 // CHECK3:       cond.false:
3892 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
3893 // CHECK3-NEXT:    br label [[COND_END]]
3894 // CHECK3:       cond.end:
3895 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
3896 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
3897 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
3898 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
3899 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
3900 // CHECK3:       omp.inner.for.cond:
3901 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3902 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
3903 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
3904 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3905 // CHECK3:       omp.inner.for.body:
3906 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
3907 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
3908 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !38
3909 // CHECK3-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
3910 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !38
3911 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !38
3912 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
3913 // CHECK3:       omp.inner.for.inc:
3914 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3915 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
3916 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
3917 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
3918 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
3919 // CHECK3:       omp.inner.for.end:
3920 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
3921 // CHECK3:       omp.loop.exit:
3922 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3923 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
3924 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
3925 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
3926 // CHECK3-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
3927 // CHECK3-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3928 // CHECK3:       .omp.final.then:
3929 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3930 // CHECK3-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
3931 // CHECK3-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
3932 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
3933 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
3934 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
3935 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
3936 // CHECK3:       .omp.final.done:
3937 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
3938 // CHECK3:       omp.precond.end:
3939 // CHECK3-NEXT:    ret void
3940 //
3941 //
3942 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15
3943 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
3944 // CHECK3-NEXT:  entry:
3945 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3946 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3947 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3948 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3949 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
3950 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
3951 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
3952 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
3953 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
3954 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
3955 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
3956 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3957 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
3958 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
3959 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
3960 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
3961 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3962 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3963 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
3964 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 4
3965 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3966 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3967 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3968 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3969 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
3970 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
3971 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
3972 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
3973 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
3974 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
3975 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
3976 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
3977 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
3978 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
3979 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3980 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3981 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
3982 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3983 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
3984 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
3985 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
3986 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3987 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
3988 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3989 // CHECK3:       omp.precond.then:
3990 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
3991 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
3992 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
3993 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3994 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3995 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
3996 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
3997 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3998 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3999 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4000 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4001 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
4002 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
4003 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4004 // CHECK3:       omp.dispatch.cond:
4005 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4006 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4007 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
4008 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4009 // CHECK3:       cond.true:
4010 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4011 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4012 // CHECK3:       cond.false:
4013 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4014 // CHECK3-NEXT:    br label [[COND_END]]
4015 // CHECK3:       cond.end:
4016 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
4017 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
4018 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4019 // CHECK3-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
4020 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
4021 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4022 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
4023 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4024 // CHECK3:       omp.dispatch.body:
4025 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4026 // CHECK3:       omp.inner.for.cond:
4027 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
4028 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
4029 // CHECK3-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
4030 // CHECK3-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4031 // CHECK3:       omp.inner.for.body:
4032 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
4033 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
4034 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4035 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
4036 // CHECK3-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !41
4037 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
4038 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
4039 // CHECK3-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !41
4040 // CHECK3-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !41
4041 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
4042 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
4043 // CHECK3-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !41
4044 // CHECK3-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
4045 // CHECK3-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !41
4046 // CHECK3-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
4047 // CHECK3-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
4048 // CHECK3-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !41
4049 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 0
4050 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP31]], align 4, !llvm.access.group !41
4051 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 1
4052 // CHECK3-NEXT:    store i32* [[I4]], i32** [[TMP32]], align 4, !llvm.access.group !41
4053 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 2
4054 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP33]], align 4, !llvm.access.group !41
4055 // CHECK3-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[CLASS_ANON_4]], %class.anon.4* [[REF_TMP]], i32 0, i32 3
4056 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP34]], align 4, !llvm.access.group !41
4057 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE3_clEv"(%class.anon.4* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !41
4058 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4059 // CHECK3:       omp.body.continue:
4060 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4061 // CHECK3:       omp.inner.for.inc:
4062 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
4063 // CHECK3-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP35]], 1
4064 // CHECK3-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
4065 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
4066 // CHECK3:       omp.inner.for.end:
4067 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4068 // CHECK3:       omp.dispatch.inc:
4069 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4070 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4071 // CHECK3-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
4072 // CHECK3-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
4073 // CHECK3-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4074 // CHECK3-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
4075 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP38]], [[TMP39]]
4076 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
4077 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
4078 // CHECK3:       omp.dispatch.end:
4079 // CHECK3-NEXT:    [[TMP40:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4080 // CHECK3-NEXT:    [[TMP41:%.*]] = load i32, i32* [[TMP40]], align 4
4081 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP41]])
4082 // CHECK3-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4083 // CHECK3-NEXT:    [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
4084 // CHECK3-NEXT:    br i1 [[TMP43]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4085 // CHECK3:       .omp.final.then:
4086 // CHECK3-NEXT:    [[TMP44:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4087 // CHECK3-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP44]], 0
4088 // CHECK3-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
4089 // CHECK3-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
4090 // CHECK3-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
4091 // CHECK3-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
4092 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4093 // CHECK3:       .omp.final.done:
4094 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
4095 // CHECK3:       omp.precond.end:
4096 // CHECK3-NEXT:    ret void
4097 //
4098 //
4099 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l299
4100 // CHECK3-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
4101 // CHECK3-NEXT:  entry:
4102 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4103 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
4104 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
4105 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
4106 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4107 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
4108 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
4109 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
4110 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4111 // CHECK3-NEXT:    ret void
4112 //
4113 //
4114 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..18
4115 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4116 // CHECK3-NEXT:  entry:
4117 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4118 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4119 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4120 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4121 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4122 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4123 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4124 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4125 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4126 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4127 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4128 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4129 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4130 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4131 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4132 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
4133 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4134 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4135 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
4136 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
4137 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
4138 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
4139 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4140 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4141 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4142 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4143 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4144 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4145 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4146 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4147 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4148 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4149 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4150 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
4151 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4152 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4153 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4154 // CHECK3:       omp.precond.then:
4155 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4156 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4157 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
4158 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4159 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4160 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4161 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
4162 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4163 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4164 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4165 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
4166 // CHECK3-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4167 // CHECK3:       cond.true:
4168 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4169 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4170 // CHECK3:       cond.false:
4171 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4172 // CHECK3-NEXT:    br label [[COND_END]]
4173 // CHECK3:       cond.end:
4174 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
4175 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4176 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4177 // CHECK3-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
4178 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4179 // CHECK3:       omp.inner.for.cond:
4180 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
4181 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
4182 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
4183 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4184 // CHECK3:       omp.inner.for.body:
4185 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
4186 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
4187 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !44
4188 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4189 // CHECK3:       omp.inner.for.inc:
4190 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
4191 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
4192 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
4193 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
4194 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
4195 // CHECK3:       omp.inner.for.end:
4196 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4197 // CHECK3:       omp.loop.exit:
4198 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4199 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
4200 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
4201 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4202 // CHECK3-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
4203 // CHECK3-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4204 // CHECK3:       .omp.final.then:
4205 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4206 // CHECK3-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
4207 // CHECK3-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
4208 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
4209 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
4210 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
4211 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4212 // CHECK3:       .omp.final.done:
4213 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
4214 // CHECK3:       omp.precond.end:
4215 // CHECK3-NEXT:    ret void
4216 //
4217 //
4218 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..19
4219 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4220 // CHECK3-NEXT:  entry:
4221 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4222 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4223 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4224 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4225 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4226 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4227 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4228 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4229 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4230 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4231 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4232 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4233 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4234 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4235 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4236 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4237 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4238 // CHECK3-NEXT:    [[I3:%.*]] = alloca i32, align 4
4239 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_5:%.*]], align 4
4240 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4241 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4242 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4243 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4244 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
4245 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
4246 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
4247 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
4248 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4249 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4250 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4251 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4252 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4253 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
4254 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4255 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4256 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4257 // CHECK3-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4258 // CHECK3-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4259 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
4260 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4261 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4262 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4263 // CHECK3:       omp.precond.then:
4264 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4265 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4266 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4267 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4268 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4269 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
4270 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
4271 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4272 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4273 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4274 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4275 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4276 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
4277 // CHECK3-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
4278 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4279 // CHECK3:       omp.dispatch.cond:
4280 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4281 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
4282 // CHECK3-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4283 // CHECK3-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
4284 // CHECK3-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4285 // CHECK3:       omp.dispatch.body:
4286 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4287 // CHECK3-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
4288 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4289 // CHECK3:       omp.inner.for.cond:
4290 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
4291 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
4292 // CHECK3-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
4293 // CHECK3-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4294 // CHECK3:       omp.inner.for.body:
4295 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
4296 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
4297 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4298 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !47
4299 // CHECK3-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !47
4300 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
4301 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
4302 // CHECK3-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !47
4303 // CHECK3-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !47
4304 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
4305 // CHECK3-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
4306 // CHECK3-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !47
4307 // CHECK3-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
4308 // CHECK3-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !47
4309 // CHECK3-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !47
4310 // CHECK3-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
4311 // CHECK3-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !47
4312 // CHECK3-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 0
4313 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP29]], align 4, !llvm.access.group !47
4314 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 1
4315 // CHECK3-NEXT:    store i32* [[I3]], i32** [[TMP30]], align 4, !llvm.access.group !47
4316 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 2
4317 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP31]], align 4, !llvm.access.group !47
4318 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_5]], %class.anon.5* [[REF_TMP]], i32 0, i32 3
4319 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP32]], align 4, !llvm.access.group !47
4320 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE4_clEv"(%class.anon.5* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !47
4321 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4322 // CHECK3:       omp.body.continue:
4323 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4324 // CHECK3:       omp.inner.for.inc:
4325 // CHECK3-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
4326 // CHECK3-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP33]], 1
4327 // CHECK3-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
4328 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
4329 // CHECK3:       omp.inner.for.end:
4330 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4331 // CHECK3:       omp.dispatch.inc:
4332 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
4333 // CHECK3:       omp.dispatch.end:
4334 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4335 // CHECK3-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
4336 // CHECK3-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4337 // CHECK3:       .omp.final.then:
4338 // CHECK3-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4339 // CHECK3-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP36]], 0
4340 // CHECK3-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
4341 // CHECK3-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
4342 // CHECK3-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
4343 // CHECK3-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
4344 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4345 // CHECK3:       .omp.final.done:
4346 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
4347 // CHECK3:       omp.precond.end:
4348 // CHECK3-NEXT:    ret void
4349 //
4350 //
4351 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l328
4352 // CHECK3-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR2]] {
4353 // CHECK3-NEXT:  entry:
4354 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
4355 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
4356 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
4357 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
4358 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
4359 // CHECK3-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
4360 // CHECK3-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
4361 // CHECK3-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
4362 // CHECK3-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
4363 // CHECK3-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
4364 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
4365 // CHECK3-NEXT:    ret void
4366 //
4367 //
4368 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..22
4369 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
4370 // CHECK3-NEXT:  entry:
4371 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4372 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4373 // CHECK3-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
4374 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4375 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4376 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4377 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4378 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4379 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4380 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4381 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4382 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4383 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4384 // CHECK3-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4385 // CHECK3-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4386 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4387 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4388 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
4389 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
4390 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4391 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4392 // CHECK3-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
4393 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
4394 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
4395 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
4396 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
4397 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
4398 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4399 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
4400 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
4401 // CHECK3-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
4402 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
4403 // CHECK3-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
4404 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
4405 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4406 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4407 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
4408 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4409 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4410 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4411 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
4412 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4413 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
4414 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4415 // CHECK3:       omp.precond.then:
4416 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
4417 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4418 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
4419 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4420 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4421 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4422 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
4423 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
4424 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4425 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4426 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
4427 // CHECK3-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4428 // CHECK3:       cond.true:
4429 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4430 // CHECK3-NEXT:    br label [[COND_END:%.*]]
4431 // CHECK3:       cond.false:
4432 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
4433 // CHECK3-NEXT:    br label [[COND_END]]
4434 // CHECK3:       cond.end:
4435 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
4436 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
4437 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
4438 // CHECK3-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
4439 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4440 // CHECK3:       omp.inner.for.cond:
4441 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
4442 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
4443 // CHECK3-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
4444 // CHECK3-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4445 // CHECK3:       omp.inner.for.body:
4446 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
4447 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
4448 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !50
4449 // CHECK3-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
4450 // CHECK3-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !50
4451 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !50
4452 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4453 // CHECK3:       omp.inner.for.inc:
4454 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
4455 // CHECK3-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
4456 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
4457 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
4458 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
4459 // CHECK3:       omp.inner.for.end:
4460 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
4461 // CHECK3:       omp.loop.exit:
4462 // CHECK3-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4463 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
4464 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
4465 // CHECK3-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4466 // CHECK3-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
4467 // CHECK3-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4468 // CHECK3:       .omp.final.then:
4469 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4470 // CHECK3-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
4471 // CHECK3-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
4472 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
4473 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
4474 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
4475 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4476 // CHECK3:       .omp.final.done:
4477 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
4478 // CHECK3:       omp.precond.end:
4479 // CHECK3-NEXT:    ret void
4480 //
4481 //
4482 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..23
4483 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
4484 // CHECK3-NEXT:  entry:
4485 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
4486 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
4487 // CHECK3-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4488 // CHECK3-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4489 // CHECK3-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
4490 // CHECK3-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
4491 // CHECK3-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
4492 // CHECK3-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
4493 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
4494 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
4495 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4496 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4497 // CHECK3-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
4498 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
4499 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
4500 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
4501 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4502 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4503 // CHECK3-NEXT:    [[I4:%.*]] = alloca i32, align 4
4504 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 4
4505 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
4506 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
4507 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4508 // CHECK3-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4509 // CHECK3-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
4510 // CHECK3-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
4511 // CHECK3-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
4512 // CHECK3-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
4513 // CHECK3-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4514 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
4515 // CHECK3-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
4516 // CHECK3-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
4517 // CHECK3-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
4518 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
4519 // CHECK3-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4520 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4521 // CHECK3-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
4522 // CHECK3-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4523 // CHECK3-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
4524 // CHECK3-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
4525 // CHECK3-NEXT:    store i32 0, i32* [[I]], align 4
4526 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4527 // CHECK3-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
4528 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
4529 // CHECK3:       omp.precond.then:
4530 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
4531 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
4532 // CHECK3-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
4533 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
4534 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
4535 // CHECK3-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
4536 // CHECK3-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
4537 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
4538 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
4539 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
4540 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4541 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
4542 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4543 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
4544 // CHECK3-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
4545 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
4546 // CHECK3:       omp.dispatch.cond:
4547 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
4548 // CHECK3-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
4549 // CHECK3-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
4550 // CHECK3-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
4551 // CHECK3-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4552 // CHECK3:       omp.dispatch.body:
4553 // CHECK3-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
4554 // CHECK3-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
4555 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
4556 // CHECK3:       omp.inner.for.cond:
4557 // CHECK3-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
4558 // CHECK3-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
4559 // CHECK3-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
4560 // CHECK3-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4561 // CHECK3:       omp.inner.for.body:
4562 // CHECK3-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
4563 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
4564 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4565 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
4566 // CHECK3-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !53
4567 // CHECK3-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
4568 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
4569 // CHECK3-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !53
4570 // CHECK3-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !53
4571 // CHECK3-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
4572 // CHECK3-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
4573 // CHECK3-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !53
4574 // CHECK3-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
4575 // CHECK3-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !53
4576 // CHECK3-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
4577 // CHECK3-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
4578 // CHECK3-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !53
4579 // CHECK3-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 0
4580 // CHECK3-NEXT:    store double** [[TMP1]], double*** [[TMP30]], align 4, !llvm.access.group !53
4581 // CHECK3-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 1
4582 // CHECK3-NEXT:    store i32* [[I4]], i32** [[TMP31]], align 4, !llvm.access.group !53
4583 // CHECK3-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 2
4584 // CHECK3-NEXT:    store double** [[TMP2]], double*** [[TMP32]], align 4, !llvm.access.group !53
4585 // CHECK3-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_6]], %class.anon.6* [[REF_TMP]], i32 0, i32 3
4586 // CHECK3-NEXT:    store double** [[TMP3]], double*** [[TMP33]], align 4, !llvm.access.group !53
4587 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE5_clEv"(%class.anon.6* noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !53
4588 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
4589 // CHECK3:       omp.body.continue:
4590 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
4591 // CHECK3:       omp.inner.for.inc:
4592 // CHECK3-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
4593 // CHECK3-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP34]], 1
4594 // CHECK3-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
4595 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
4596 // CHECK3:       omp.inner.for.end:
4597 // CHECK3-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
4598 // CHECK3:       omp.dispatch.inc:
4599 // CHECK3-NEXT:    br label [[OMP_DISPATCH_COND]]
4600 // CHECK3:       omp.dispatch.end:
4601 // CHECK3-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
4602 // CHECK3-NEXT:    [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0
4603 // CHECK3-NEXT:    br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4604 // CHECK3:       .omp.final.then:
4605 // CHECK3-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4606 // CHECK3-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP37]], 0
4607 // CHECK3-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
4608 // CHECK3-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
4609 // CHECK3-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
4610 // CHECK3-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
4611 // CHECK3-NEXT:    br label [[DOTOMP_FINAL_DONE]]
4612 // CHECK3:       .omp.final.done:
4613 // CHECK3-NEXT:    br label [[OMP_PRECOND_END]]
4614 // CHECK3:       omp.precond.end:
4615 // CHECK3-NEXT:    ret void
4616 //
4617 //
4618 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
4619 // CHECK3-SAME: () #[[ATTR4:[0-9]+]] {
4620 // CHECK3-NEXT:  entry:
4621 // CHECK3-NEXT:    call void @__tgt_register_requires(i64 1)
4622 // CHECK3-NEXT:    ret void
4623 //
4624 //
4625 // CHECK5-LABEL: define {{[^@]+}}@main
4626 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
4627 // CHECK5-NEXT:  entry:
4628 // CHECK5-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
4629 // CHECK5-NEXT:    [[A:%.*]] = alloca double*, align 8
4630 // CHECK5-NEXT:    [[B:%.*]] = alloca double*, align 8
4631 // CHECK5-NEXT:    [[C:%.*]] = alloca double*, align 8
4632 // CHECK5-NEXT:    [[N:%.*]] = alloca i32, align 4
4633 // CHECK5-NEXT:    [[CH:%.*]] = alloca i32, align 4
4634 // CHECK5-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
4635 // CHECK5-NEXT:    store i32 0, i32* [[RETVAL]], align 4
4636 // CHECK5-NEXT:    store i32 10000, i32* [[N]], align 4
4637 // CHECK5-NEXT:    store i32 100, i32* [[CH]], align 4
4638 // CHECK5-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
4639 // CHECK5-NEXT:    store i32* [[N]], i32** [[TMP0]], align 8
4640 // CHECK5-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
4641 // CHECK5-NEXT:    store double** [[A]], double*** [[TMP1]], align 8
4642 // CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
4643 // CHECK5-NEXT:    store double** [[B]], double*** [[TMP2]], align 8
4644 // CHECK5-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
4645 // CHECK5-NEXT:    store double** [[C]], double*** [[TMP3]], align 8
4646 // CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
4647 // CHECK5-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 8
4648 // CHECK5-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 8 dereferenceable(40) [[REF_TMP]])
4649 // CHECK5-NEXT:    ret i32 0
4650 //
4651 //
4652 // CHECK7-LABEL: define {{[^@]+}}@main
4653 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
4654 // CHECK7-NEXT:  entry:
4655 // CHECK7-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
4656 // CHECK7-NEXT:    [[A:%.*]] = alloca double*, align 4
4657 // CHECK7-NEXT:    [[B:%.*]] = alloca double*, align 4
4658 // CHECK7-NEXT:    [[C:%.*]] = alloca double*, align 4
4659 // CHECK7-NEXT:    [[N:%.*]] = alloca i32, align 4
4660 // CHECK7-NEXT:    [[CH:%.*]] = alloca i32, align 4
4661 // CHECK7-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
4662 // CHECK7-NEXT:    store i32 0, i32* [[RETVAL]], align 4
4663 // CHECK7-NEXT:    store i32 10000, i32* [[N]], align 4
4664 // CHECK7-NEXT:    store i32 100, i32* [[CH]], align 4
4665 // CHECK7-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0
4666 // CHECK7-NEXT:    store i32* [[N]], i32** [[TMP0]], align 4
4667 // CHECK7-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1
4668 // CHECK7-NEXT:    store double** [[A]], double*** [[TMP1]], align 4
4669 // CHECK7-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 2
4670 // CHECK7-NEXT:    store double** [[B]], double*** [[TMP2]], align 4
4671 // CHECK7-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 3
4672 // CHECK7-NEXT:    store double** [[C]], double*** [[TMP3]], align 4
4673 // CHECK7-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 4
4674 // CHECK7-NEXT:    store i32* [[CH]], i32** [[TMP4]], align 4
4675 // CHECK7-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 4 dereferenceable(20) [[REF_TMP]])
4676 // CHECK7-NEXT:    ret i32 0
4677 //
4678 //
4679 // CHECK9-LABEL: define {{[^@]+}}@main
4680 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
4681 // CHECK9-NEXT:  entry:
4682 // CHECK9-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
4683 // CHECK9-NEXT:    [[A:%.*]] = alloca double*, align 8
4684 // CHECK9-NEXT:    [[B:%.*]] = alloca double*, align 8
4685 // CHECK9-NEXT:    [[C:%.*]] = alloca double*, align 8
4686 // CHECK9-NEXT:    [[N:%.*]] = alloca i32, align 4
4687 // CHECK9-NEXT:    [[CH:%.*]] = alloca i32, align 4
4688 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
4689 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
4690 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
4691 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
4692 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
4693 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4694 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4695 // CHECK9-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
4696 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
4697 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
4698 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
4699 // CHECK9-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
4700 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
4701 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
4702 // CHECK9-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
4703 // CHECK9-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
4704 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
4705 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
4706 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
4707 // CHECK9-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
4708 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
4709 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
4710 // CHECK9-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
4711 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
4712 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
4713 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
4714 // CHECK9-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
4715 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
4716 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
4717 // CHECK9-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
4718 // CHECK9-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
4719 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
4720 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
4721 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
4722 // CHECK9-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
4723 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
4724 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
4725 // CHECK9-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
4726 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
4727 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
4728 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
4729 // CHECK9-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
4730 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
4731 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
4732 // CHECK9-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
4733 // CHECK9-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
4734 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
4735 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
4736 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
4737 // CHECK9-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
4738 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
4739 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
4740 // CHECK9-NEXT:    store i32 0, i32* [[RETVAL]], align 4
4741 // CHECK9-NEXT:    store i32 10000, i32* [[N]], align 4
4742 // CHECK9-NEXT:    store i32 100, i32* [[CH]], align 4
4743 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
4744 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
4745 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
4746 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
4747 // CHECK9-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 8
4748 // CHECK9-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 8
4749 // CHECK9-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 8
4750 // CHECK9-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4751 // CHECK9-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
4752 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
4753 // CHECK9-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4754 // CHECK9-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
4755 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
4756 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
4757 // CHECK9-NEXT:    store i8* null, i8** [[TMP9]], align 8
4758 // CHECK9-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
4759 // CHECK9-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
4760 // CHECK9-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 8
4761 // CHECK9-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
4762 // CHECK9-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
4763 // CHECK9-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 8
4764 // CHECK9-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
4765 // CHECK9-NEXT:    store i8* null, i8** [[TMP14]], align 8
4766 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
4767 // CHECK9-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
4768 // CHECK9-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 8
4769 // CHECK9-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
4770 // CHECK9-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
4771 // CHECK9-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 8
4772 // CHECK9-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
4773 // CHECK9-NEXT:    store i8* null, i8** [[TMP19]], align 8
4774 // CHECK9-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
4775 // CHECK9-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
4776 // CHECK9-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 8
4777 // CHECK9-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
4778 // CHECK9-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
4779 // CHECK9-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 8
4780 // CHECK9-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
4781 // CHECK9-NEXT:    store i8* null, i8** [[TMP24]], align 8
4782 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4783 // CHECK9-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4784 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
4785 // CHECK9-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
4786 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
4787 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
4788 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
4789 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
4790 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
4791 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
4792 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
4793 // CHECK9-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
4794 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
4795 // CHECK9-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
4796 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
4797 // CHECK9-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4798 // CHECK9:       omp_offload.failed:
4799 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i64 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
4800 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT]]
4801 // CHECK9:       omp_offload.cont:
4802 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
4803 // CHECK9-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
4804 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
4805 // CHECK9-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
4806 // CHECK9-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 8
4807 // CHECK9-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 8
4808 // CHECK9-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 8
4809 // CHECK9-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
4810 // CHECK9-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
4811 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
4812 // CHECK9-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
4813 // CHECK9-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
4814 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
4815 // CHECK9-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
4816 // CHECK9-NEXT:    store i8* null, i8** [[TMP42]], align 8
4817 // CHECK9-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
4818 // CHECK9-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
4819 // CHECK9-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 8
4820 // CHECK9-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
4821 // CHECK9-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
4822 // CHECK9-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 8
4823 // CHECK9-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
4824 // CHECK9-NEXT:    store i8* null, i8** [[TMP47]], align 8
4825 // CHECK9-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
4826 // CHECK9-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
4827 // CHECK9-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 8
4828 // CHECK9-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
4829 // CHECK9-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
4830 // CHECK9-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 8
4831 // CHECK9-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
4832 // CHECK9-NEXT:    store i8* null, i8** [[TMP52]], align 8
4833 // CHECK9-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
4834 // CHECK9-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
4835 // CHECK9-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 8
4836 // CHECK9-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
4837 // CHECK9-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
4838 // CHECK9-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 8
4839 // CHECK9-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
4840 // CHECK9-NEXT:    store i8* null, i8** [[TMP57]], align 8
4841 // CHECK9-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
4842 // CHECK9-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
4843 // CHECK9-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
4844 // CHECK9-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
4845 // CHECK9-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
4846 // CHECK9-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
4847 // CHECK9-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
4848 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
4849 // CHECK9-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
4850 // CHECK9-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
4851 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
4852 // CHECK9-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
4853 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
4854 // CHECK9-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
4855 // CHECK9-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
4856 // CHECK9-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
4857 // CHECK9:       omp_offload.failed15:
4858 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i64 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
4859 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
4860 // CHECK9:       omp_offload.cont16:
4861 // CHECK9-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
4862 // CHECK9-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
4863 // CHECK9-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
4864 // CHECK9-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
4865 // CHECK9-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
4866 // CHECK9-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
4867 // CHECK9-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
4868 // CHECK9-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
4869 // CHECK9-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 8
4870 // CHECK9-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 8
4871 // CHECK9-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 8
4872 // CHECK9-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
4873 // CHECK9-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
4874 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
4875 // CHECK9-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
4876 // CHECK9-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
4877 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
4878 // CHECK9-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
4879 // CHECK9-NEXT:    store i8* null, i8** [[TMP77]], align 8
4880 // CHECK9-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
4881 // CHECK9-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
4882 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
4883 // CHECK9-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
4884 // CHECK9-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
4885 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
4886 // CHECK9-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
4887 // CHECK9-NEXT:    store i8* null, i8** [[TMP82]], align 8
4888 // CHECK9-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
4889 // CHECK9-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
4890 // CHECK9-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 8
4891 // CHECK9-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
4892 // CHECK9-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
4893 // CHECK9-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 8
4894 // CHECK9-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
4895 // CHECK9-NEXT:    store i8* null, i8** [[TMP87]], align 8
4896 // CHECK9-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
4897 // CHECK9-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
4898 // CHECK9-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 8
4899 // CHECK9-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
4900 // CHECK9-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
4901 // CHECK9-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 8
4902 // CHECK9-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
4903 // CHECK9-NEXT:    store i8* null, i8** [[TMP92]], align 8
4904 // CHECK9-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
4905 // CHECK9-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
4906 // CHECK9-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 8
4907 // CHECK9-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
4908 // CHECK9-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
4909 // CHECK9-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 8
4910 // CHECK9-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
4911 // CHECK9-NEXT:    store i8* null, i8** [[TMP97]], align 8
4912 // CHECK9-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
4913 // CHECK9-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
4914 // CHECK9-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
4915 // CHECK9-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
4916 // CHECK9-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
4917 // CHECK9-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
4918 // CHECK9-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
4919 // CHECK9-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
4920 // CHECK9-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
4921 // CHECK9-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
4922 // CHECK9-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
4923 // CHECK9-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
4924 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
4925 // CHECK9-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
4926 // CHECK9-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
4927 // CHECK9-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
4928 // CHECK9:       omp_offload.failed30:
4929 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i64 [[TMP67]], i64 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
4930 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
4931 // CHECK9:       omp_offload.cont31:
4932 // CHECK9-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
4933 // CHECK9-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
4934 // CHECK9-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
4935 // CHECK9-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
4936 // CHECK9-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 8
4937 // CHECK9-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 8
4938 // CHECK9-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 8
4939 // CHECK9-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
4940 // CHECK9-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
4941 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
4942 // CHECK9-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
4943 // CHECK9-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
4944 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
4945 // CHECK9-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
4946 // CHECK9-NEXT:    store i8* null, i8** [[TMP115]], align 8
4947 // CHECK9-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
4948 // CHECK9-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
4949 // CHECK9-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 8
4950 // CHECK9-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
4951 // CHECK9-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
4952 // CHECK9-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 8
4953 // CHECK9-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
4954 // CHECK9-NEXT:    store i8* null, i8** [[TMP120]], align 8
4955 // CHECK9-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
4956 // CHECK9-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
4957 // CHECK9-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 8
4958 // CHECK9-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
4959 // CHECK9-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
4960 // CHECK9-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 8
4961 // CHECK9-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
4962 // CHECK9-NEXT:    store i8* null, i8** [[TMP125]], align 8
4963 // CHECK9-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
4964 // CHECK9-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
4965 // CHECK9-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 8
4966 // CHECK9-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
4967 // CHECK9-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
4968 // CHECK9-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 8
4969 // CHECK9-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
4970 // CHECK9-NEXT:    store i8* null, i8** [[TMP130]], align 8
4971 // CHECK9-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
4972 // CHECK9-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
4973 // CHECK9-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
4974 // CHECK9-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
4975 // CHECK9-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
4976 // CHECK9-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
4977 // CHECK9-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
4978 // CHECK9-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
4979 // CHECK9-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
4980 // CHECK9-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
4981 // CHECK9-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
4982 // CHECK9-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
4983 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
4984 // CHECK9-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
4985 // CHECK9-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
4986 // CHECK9-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
4987 // CHECK9:       omp_offload.failed44:
4988 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i64 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
4989 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
4990 // CHECK9:       omp_offload.cont45:
4991 // CHECK9-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
4992 // CHECK9-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
4993 // CHECK9-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
4994 // CHECK9-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
4995 // CHECK9-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
4996 // CHECK9-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
4997 // CHECK9-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
4998 // CHECK9-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
4999 // CHECK9-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 8
5000 // CHECK9-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 8
5001 // CHECK9-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 8
5002 // CHECK9-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
5003 // CHECK9-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
5004 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
5005 // CHECK9-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
5006 // CHECK9-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
5007 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
5008 // CHECK9-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
5009 // CHECK9-NEXT:    store i8* null, i8** [[TMP150]], align 8
5010 // CHECK9-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
5011 // CHECK9-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
5012 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
5013 // CHECK9-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
5014 // CHECK9-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
5015 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
5016 // CHECK9-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
5017 // CHECK9-NEXT:    store i8* null, i8** [[TMP155]], align 8
5018 // CHECK9-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
5019 // CHECK9-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
5020 // CHECK9-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 8
5021 // CHECK9-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
5022 // CHECK9-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
5023 // CHECK9-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 8
5024 // CHECK9-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
5025 // CHECK9-NEXT:    store i8* null, i8** [[TMP160]], align 8
5026 // CHECK9-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
5027 // CHECK9-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
5028 // CHECK9-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 8
5029 // CHECK9-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
5030 // CHECK9-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
5031 // CHECK9-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 8
5032 // CHECK9-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
5033 // CHECK9-NEXT:    store i8* null, i8** [[TMP165]], align 8
5034 // CHECK9-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
5035 // CHECK9-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
5036 // CHECK9-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 8
5037 // CHECK9-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
5038 // CHECK9-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
5039 // CHECK9-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 8
5040 // CHECK9-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
5041 // CHECK9-NEXT:    store i8* null, i8** [[TMP170]], align 8
5042 // CHECK9-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
5043 // CHECK9-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
5044 // CHECK9-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
5045 // CHECK9-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
5046 // CHECK9-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
5047 // CHECK9-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
5048 // CHECK9-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
5049 // CHECK9-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
5050 // CHECK9-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
5051 // CHECK9-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
5052 // CHECK9-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
5053 // CHECK9-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
5054 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
5055 // CHECK9-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
5056 // CHECK9-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
5057 // CHECK9-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
5058 // CHECK9:       omp_offload.failed60:
5059 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i64 [[TMP140]], i64 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
5060 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
5061 // CHECK9:       omp_offload.cont61:
5062 // CHECK9-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
5063 // CHECK9-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
5064 // CHECK9-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
5065 // CHECK9-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
5066 // CHECK9-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 8
5067 // CHECK9-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 8
5068 // CHECK9-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 8
5069 // CHECK9-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
5070 // CHECK9-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
5071 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
5072 // CHECK9-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
5073 // CHECK9-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
5074 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
5075 // CHECK9-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
5076 // CHECK9-NEXT:    store i8* null, i8** [[TMP188]], align 8
5077 // CHECK9-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
5078 // CHECK9-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
5079 // CHECK9-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 8
5080 // CHECK9-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
5081 // CHECK9-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
5082 // CHECK9-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 8
5083 // CHECK9-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
5084 // CHECK9-NEXT:    store i8* null, i8** [[TMP193]], align 8
5085 // CHECK9-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
5086 // CHECK9-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
5087 // CHECK9-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 8
5088 // CHECK9-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
5089 // CHECK9-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
5090 // CHECK9-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 8
5091 // CHECK9-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
5092 // CHECK9-NEXT:    store i8* null, i8** [[TMP198]], align 8
5093 // CHECK9-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
5094 // CHECK9-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
5095 // CHECK9-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 8
5096 // CHECK9-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
5097 // CHECK9-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
5098 // CHECK9-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 8
5099 // CHECK9-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
5100 // CHECK9-NEXT:    store i8* null, i8** [[TMP203]], align 8
5101 // CHECK9-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
5102 // CHECK9-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
5103 // CHECK9-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
5104 // CHECK9-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
5105 // CHECK9-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
5106 // CHECK9-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
5107 // CHECK9-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
5108 // CHECK9-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
5109 // CHECK9-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
5110 // CHECK9-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
5111 // CHECK9-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
5112 // CHECK9-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
5113 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
5114 // CHECK9-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
5115 // CHECK9-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
5116 // CHECK9-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
5117 // CHECK9:       omp_offload.failed74:
5118 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i64 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
5119 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
5120 // CHECK9:       omp_offload.cont75:
5121 // CHECK9-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
5122 // CHECK9-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
5123 // CHECK9-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
5124 // CHECK9-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
5125 // CHECK9-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
5126 // CHECK9-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
5127 // CHECK9-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
5128 // CHECK9-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
5129 // CHECK9-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 8
5130 // CHECK9-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 8
5131 // CHECK9-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 8
5132 // CHECK9-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
5133 // CHECK9-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
5134 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
5135 // CHECK9-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
5136 // CHECK9-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
5137 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
5138 // CHECK9-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
5139 // CHECK9-NEXT:    store i8* null, i8** [[TMP223]], align 8
5140 // CHECK9-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
5141 // CHECK9-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
5142 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
5143 // CHECK9-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
5144 // CHECK9-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
5145 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
5146 // CHECK9-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
5147 // CHECK9-NEXT:    store i8* null, i8** [[TMP228]], align 8
5148 // CHECK9-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
5149 // CHECK9-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
5150 // CHECK9-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 8
5151 // CHECK9-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
5152 // CHECK9-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
5153 // CHECK9-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 8
5154 // CHECK9-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
5155 // CHECK9-NEXT:    store i8* null, i8** [[TMP233]], align 8
5156 // CHECK9-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
5157 // CHECK9-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
5158 // CHECK9-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 8
5159 // CHECK9-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
5160 // CHECK9-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
5161 // CHECK9-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 8
5162 // CHECK9-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
5163 // CHECK9-NEXT:    store i8* null, i8** [[TMP238]], align 8
5164 // CHECK9-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
5165 // CHECK9-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
5166 // CHECK9-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 8
5167 // CHECK9-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
5168 // CHECK9-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
5169 // CHECK9-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 8
5170 // CHECK9-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
5171 // CHECK9-NEXT:    store i8* null, i8** [[TMP243]], align 8
5172 // CHECK9-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
5173 // CHECK9-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
5174 // CHECK9-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
5175 // CHECK9-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
5176 // CHECK9-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
5177 // CHECK9-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
5178 // CHECK9-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
5179 // CHECK9-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
5180 // CHECK9-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
5181 // CHECK9-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
5182 // CHECK9-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
5183 // CHECK9-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
5184 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
5185 // CHECK9-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
5186 // CHECK9-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
5187 // CHECK9-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
5188 // CHECK9:       omp_offload.failed90:
5189 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i64 [[TMP213]], i64 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
5190 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
5191 // CHECK9:       omp_offload.cont91:
5192 // CHECK9-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v()
5193 // CHECK9-NEXT:    ret i32 [[CALL]]
5194 //
5195 //
5196 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
5197 // CHECK9-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1:[0-9]+]] {
5198 // CHECK9-NEXT:  entry:
5199 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
5200 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
5201 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
5202 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
5203 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
5204 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
5205 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
5206 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
5207 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
5208 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5209 // CHECK9-NEXT:    ret void
5210 //
5211 //
5212 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
5213 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5214 // CHECK9-NEXT:  entry:
5215 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5216 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5217 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
5218 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
5219 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
5220 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
5221 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5222 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5223 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5224 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5225 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
5226 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5227 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5228 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5229 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5230 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
5231 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5232 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5233 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
5234 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
5235 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
5236 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
5237 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5238 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
5239 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
5240 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
5241 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5242 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5243 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5244 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5245 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5246 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5247 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5248 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
5249 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5250 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5251 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5252 // CHECK9:       omp.precond.then:
5253 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5254 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5255 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
5256 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5257 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5258 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5259 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
5260 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5261 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5262 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5263 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5264 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5265 // CHECK9:       cond.true:
5266 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5267 // CHECK9-NEXT:    br label [[COND_END:%.*]]
5268 // CHECK9:       cond.false:
5269 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5270 // CHECK9-NEXT:    br label [[COND_END]]
5271 // CHECK9:       cond.end:
5272 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5273 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5274 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5275 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
5276 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5277 // CHECK9:       omp.inner.for.cond:
5278 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
5279 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
5280 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
5281 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5282 // CHECK9:       omp.inner.for.body:
5283 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !17
5284 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
5285 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !17
5286 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
5287 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !17
5288 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5289 // CHECK9:       omp.inner.for.inc:
5290 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
5291 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !17
5292 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
5293 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !17
5294 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
5295 // CHECK9:       omp.inner.for.end:
5296 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5297 // CHECK9:       omp.loop.exit:
5298 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5299 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
5300 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
5301 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5302 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
5303 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5304 // CHECK9:       .omp.final.then:
5305 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5306 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
5307 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5308 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5309 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5310 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
5311 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5312 // CHECK9:       .omp.final.done:
5313 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
5314 // CHECK9:       omp.precond.end:
5315 // CHECK9-NEXT:    ret void
5316 //
5317 //
5318 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1
5319 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5320 // CHECK9-NEXT:  entry:
5321 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5322 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5323 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5324 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5325 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
5326 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
5327 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
5328 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
5329 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5330 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5331 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5332 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5333 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
5334 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5335 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5336 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5337 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5338 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
5339 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5340 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5341 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5342 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5343 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
5344 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
5345 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
5346 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
5347 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5348 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
5349 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
5350 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
5351 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5352 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5353 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5354 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5355 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5356 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5357 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5358 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
5359 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5360 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5361 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5362 // CHECK9:       omp.precond.then:
5363 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5364 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5365 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5366 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5367 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
5368 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5369 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
5370 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5371 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
5372 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5373 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5374 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5375 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5376 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5377 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5378 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5379 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5380 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5381 // CHECK9:       cond.true:
5382 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5383 // CHECK9-NEXT:    br label [[COND_END:%.*]]
5384 // CHECK9:       cond.false:
5385 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5386 // CHECK9-NEXT:    br label [[COND_END]]
5387 // CHECK9:       cond.end:
5388 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5389 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5390 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5391 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5392 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5393 // CHECK9:       omp.inner.for.cond:
5394 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
5395 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
5396 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5397 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5398 // CHECK9:       omp.inner.for.body:
5399 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
5400 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5401 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5402 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !21
5403 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !21
5404 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
5405 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
5406 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
5407 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !21
5408 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !21
5409 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
5410 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
5411 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
5412 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !21
5413 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
5414 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !21
5415 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !21
5416 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
5417 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
5418 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !21
5419 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5420 // CHECK9:       omp.body.continue:
5421 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5422 // CHECK9:       omp.inner.for.inc:
5423 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
5424 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
5425 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
5426 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
5427 // CHECK9:       omp.inner.for.end:
5428 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5429 // CHECK9:       omp.loop.exit:
5430 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5431 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
5432 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
5433 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5434 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
5435 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5436 // CHECK9:       .omp.final.then:
5437 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5438 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
5439 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
5440 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
5441 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
5442 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
5443 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5444 // CHECK9:       .omp.final.done:
5445 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
5446 // CHECK9:       omp.precond.end:
5447 // CHECK9-NEXT:    ret void
5448 //
5449 //
5450 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
5451 // CHECK9-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
5452 // CHECK9-NEXT:  entry:
5453 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
5454 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
5455 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
5456 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
5457 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
5458 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
5459 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
5460 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
5461 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
5462 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5463 // CHECK9-NEXT:    ret void
5464 //
5465 //
5466 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..2
5467 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5468 // CHECK9-NEXT:  entry:
5469 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5470 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5471 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
5472 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
5473 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
5474 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
5475 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5476 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5477 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5478 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5479 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
5480 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5481 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5482 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5483 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5484 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
5485 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5486 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5487 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
5488 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
5489 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
5490 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
5491 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5492 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
5493 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
5494 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
5495 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5496 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5497 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5498 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5499 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5500 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5501 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5502 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
5503 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5504 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5505 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5506 // CHECK9:       omp.precond.then:
5507 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5508 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5509 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
5510 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5511 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5512 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5513 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
5514 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5515 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5516 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5517 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5518 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5519 // CHECK9:       cond.true:
5520 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5521 // CHECK9-NEXT:    br label [[COND_END:%.*]]
5522 // CHECK9:       cond.false:
5523 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5524 // CHECK9-NEXT:    br label [[COND_END]]
5525 // CHECK9:       cond.end:
5526 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5527 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5528 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5529 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
5530 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5531 // CHECK9:       omp.inner.for.cond:
5532 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5533 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5534 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
5535 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5536 // CHECK9:       omp.inner.for.body:
5537 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26
5538 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
5539 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26
5540 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
5541 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !26
5542 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5543 // CHECK9:       omp.inner.for.inc:
5544 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5545 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26
5546 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
5547 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26
5548 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
5549 // CHECK9:       omp.inner.for.end:
5550 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5551 // CHECK9:       omp.loop.exit:
5552 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5553 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
5554 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
5555 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5556 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
5557 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5558 // CHECK9:       .omp.final.then:
5559 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5560 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
5561 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5562 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5563 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5564 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
5565 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5566 // CHECK9:       .omp.final.done:
5567 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
5568 // CHECK9:       omp.precond.end:
5569 // CHECK9-NEXT:    ret void
5570 //
5571 //
5572 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3
5573 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5574 // CHECK9-NEXT:  entry:
5575 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5576 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5577 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5578 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5579 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
5580 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
5581 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
5582 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
5583 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5584 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5585 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5586 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5587 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
5588 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5589 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5590 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5591 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5592 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
5593 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5594 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5595 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5596 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5597 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
5598 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
5599 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
5600 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
5601 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5602 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
5603 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
5604 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
5605 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5606 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5607 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5608 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5609 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5610 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5611 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5612 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
5613 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5614 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5615 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5616 // CHECK9:       omp.precond.then:
5617 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5618 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5619 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5620 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5621 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
5622 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5623 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
5624 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5625 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
5626 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5627 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5628 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5629 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5630 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5631 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5632 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5633 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5634 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5635 // CHECK9:       cond.true:
5636 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5637 // CHECK9-NEXT:    br label [[COND_END:%.*]]
5638 // CHECK9:       cond.false:
5639 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5640 // CHECK9-NEXT:    br label [[COND_END]]
5641 // CHECK9:       cond.end:
5642 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5643 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5644 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5645 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5646 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5647 // CHECK9:       omp.inner.for.cond:
5648 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5649 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29
5650 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5651 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5652 // CHECK9:       omp.inner.for.body:
5653 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5654 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5655 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5656 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !29
5657 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !29
5658 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
5659 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
5660 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
5661 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !29
5662 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !29
5663 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
5664 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
5665 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
5666 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !29
5667 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
5668 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !29
5669 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !29
5670 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
5671 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
5672 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !29
5673 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5674 // CHECK9:       omp.body.continue:
5675 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5676 // CHECK9:       omp.inner.for.inc:
5677 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5678 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
5679 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29
5680 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
5681 // CHECK9:       omp.inner.for.end:
5682 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5683 // CHECK9:       omp.loop.exit:
5684 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5685 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
5686 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
5687 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5688 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
5689 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5690 // CHECK9:       .omp.final.then:
5691 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5692 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
5693 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
5694 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
5695 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
5696 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
5697 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5698 // CHECK9:       .omp.final.done:
5699 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
5700 // CHECK9:       omp.precond.end:
5701 // CHECK9-NEXT:    ret void
5702 //
5703 //
5704 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
5705 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
5706 // CHECK9-NEXT:  entry:
5707 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
5708 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
5709 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
5710 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
5711 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
5712 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
5713 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
5714 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
5715 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
5716 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
5717 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
5718 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
5719 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
5720 // CHECK9-NEXT:    ret void
5721 //
5722 //
5723 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6
5724 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5725 // CHECK9-NEXT:  entry:
5726 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5727 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5728 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
5729 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
5730 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
5731 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
5732 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
5733 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5734 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5735 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5736 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5737 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
5738 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5739 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5740 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5741 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5742 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
5743 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5744 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5745 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
5746 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
5747 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
5748 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
5749 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
5750 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
5751 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5752 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
5753 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
5754 // CHECK9-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
5755 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
5756 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
5757 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5758 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
5759 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5760 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5761 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5762 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
5763 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5764 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
5765 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5766 // CHECK9:       omp.precond.then:
5767 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
5768 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5769 // CHECK9-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
5770 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5771 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5772 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
5773 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5774 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5775 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
5776 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5777 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5778 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5779 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5780 // CHECK9:       cond.true:
5781 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5782 // CHECK9-NEXT:    br label [[COND_END:%.*]]
5783 // CHECK9:       cond.false:
5784 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
5785 // CHECK9-NEXT:    br label [[COND_END]]
5786 // CHECK9:       cond.end:
5787 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5788 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
5789 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
5790 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5791 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5792 // CHECK9:       omp.inner.for.cond:
5793 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5794 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
5795 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
5796 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
5797 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5798 // CHECK9:       omp.inner.for.body:
5799 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5800 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
5801 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5802 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
5803 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !32
5804 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5805 // CHECK9:       omp.inner.for.inc:
5806 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5807 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
5808 // CHECK9-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
5809 // CHECK9-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5810 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5811 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
5812 // CHECK9-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
5813 // CHECK9-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5814 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5815 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32
5816 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
5817 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5818 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5819 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
5820 // CHECK9-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
5821 // CHECK9-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
5822 // CHECK9:       cond.true10:
5823 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !32
5824 // CHECK9-NEXT:    br label [[COND_END12:%.*]]
5825 // CHECK9:       cond.false11:
5826 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5827 // CHECK9-NEXT:    br label [[COND_END12]]
5828 // CHECK9:       cond.end12:
5829 // CHECK9-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
5830 // CHECK9-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32
5831 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32
5832 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32
5833 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
5834 // CHECK9:       omp.inner.for.end:
5835 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5836 // CHECK9:       omp.loop.exit:
5837 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5838 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
5839 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
5840 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5841 // CHECK9-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
5842 // CHECK9-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5843 // CHECK9:       .omp.final.then:
5844 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5845 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
5846 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
5847 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
5848 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
5849 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
5850 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5851 // CHECK9:       .omp.final.done:
5852 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
5853 // CHECK9:       omp.precond.end:
5854 // CHECK9-NEXT:    ret void
5855 //
5856 //
5857 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7
5858 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
5859 // CHECK9-NEXT:  entry:
5860 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
5861 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
5862 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5863 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5864 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
5865 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
5866 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
5867 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
5868 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
5869 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
5870 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5871 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5872 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
5873 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
5874 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
5875 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5876 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5877 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
5878 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
5879 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
5880 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5881 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5882 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
5883 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
5884 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
5885 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
5886 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
5887 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
5888 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
5889 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
5890 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
5891 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
5892 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5893 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
5894 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5895 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5896 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
5897 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
5898 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5899 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
5900 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5901 // CHECK9:       omp.precond.then:
5902 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
5903 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5904 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
5905 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
5906 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
5907 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
5908 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
5909 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
5910 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
5911 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
5912 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
5913 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5914 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
5915 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
5916 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5917 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5918 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
5919 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5920 // CHECK9:       cond.true:
5921 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
5922 // CHECK9-NEXT:    br label [[COND_END:%.*]]
5923 // CHECK9:       cond.false:
5924 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
5925 // CHECK9-NEXT:    br label [[COND_END]]
5926 // CHECK9:       cond.end:
5927 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
5928 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
5929 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
5930 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
5931 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
5932 // CHECK9:       omp.inner.for.cond:
5933 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5934 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35
5935 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
5936 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5937 // CHECK9:       omp.inner.for.body:
5938 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5939 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
5940 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5941 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !35
5942 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !35
5943 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
5944 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
5945 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
5946 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !35
5947 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !35
5948 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
5949 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
5950 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
5951 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !35
5952 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
5953 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !35
5954 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !35
5955 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
5956 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
5957 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !35
5958 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
5959 // CHECK9:       omp.body.continue:
5960 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
5961 // CHECK9:       omp.inner.for.inc:
5962 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5963 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
5964 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35
5965 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
5966 // CHECK9:       omp.inner.for.end:
5967 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
5968 // CHECK9:       omp.loop.exit:
5969 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
5970 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
5971 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
5972 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
5973 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
5974 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5975 // CHECK9:       .omp.final.then:
5976 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
5977 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
5978 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
5979 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
5980 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
5981 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
5982 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
5983 // CHECK9:       .omp.final.done:
5984 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
5985 // CHECK9:       omp.precond.end:
5986 // CHECK9-NEXT:    ret void
5987 //
5988 //
5989 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
5990 // CHECK9-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
5991 // CHECK9-NEXT:  entry:
5992 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
5993 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
5994 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
5995 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
5996 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
5997 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
5998 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
5999 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
6000 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6001 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6002 // CHECK9-NEXT:    ret void
6003 //
6004 //
6005 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10
6006 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6007 // CHECK9-NEXT:  entry:
6008 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6009 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6010 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
6011 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
6012 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
6013 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
6014 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6015 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6016 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6017 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6018 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6019 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6020 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6021 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6022 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6023 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
6024 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6025 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6026 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
6027 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
6028 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
6029 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
6030 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6031 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6032 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6033 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6034 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6035 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6036 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6037 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6038 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6039 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6040 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6041 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6042 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6043 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6044 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6045 // CHECK9:       omp.precond.then:
6046 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6047 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6048 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6049 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6050 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6051 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6052 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6053 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6054 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6055 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6056 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6057 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6058 // CHECK9:       cond.true:
6059 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6060 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6061 // CHECK9:       cond.false:
6062 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6063 // CHECK9-NEXT:    br label [[COND_END]]
6064 // CHECK9:       cond.end:
6065 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6066 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6067 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6068 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6069 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6070 // CHECK9:       omp.inner.for.cond:
6071 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6072 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
6073 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6074 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6075 // CHECK9:       omp.inner.for.body:
6076 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38
6077 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
6078 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38
6079 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
6080 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !38
6081 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6082 // CHECK9:       omp.inner.for.inc:
6083 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6084 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38
6085 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
6086 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38
6087 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
6088 // CHECK9:       omp.inner.for.end:
6089 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6090 // CHECK9:       omp.loop.exit:
6091 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6092 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
6093 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
6094 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6095 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
6096 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6097 // CHECK9:       .omp.final.then:
6098 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6099 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
6100 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6101 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6102 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6103 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
6104 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6105 // CHECK9:       .omp.final.done:
6106 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6107 // CHECK9:       omp.precond.end:
6108 // CHECK9-NEXT:    ret void
6109 //
6110 //
6111 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11
6112 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6113 // CHECK9-NEXT:  entry:
6114 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6115 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6116 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6117 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6118 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
6119 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
6120 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
6121 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
6122 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6123 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6124 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6125 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6126 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6127 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6128 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6129 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6130 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6131 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
6132 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6133 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6134 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6135 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6136 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
6137 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
6138 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
6139 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
6140 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6141 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6142 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6143 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6144 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6145 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6146 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6147 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6148 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6149 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6150 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6151 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6152 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6153 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6154 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6155 // CHECK9:       omp.precond.then:
6156 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6157 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6158 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6159 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6160 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
6161 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6162 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
6163 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
6164 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
6165 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6166 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6167 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6168 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6169 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6170 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6171 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6172 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6173 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6174 // CHECK9:       cond.true:
6175 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6176 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6177 // CHECK9:       cond.false:
6178 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6179 // CHECK9-NEXT:    br label [[COND_END]]
6180 // CHECK9:       cond.end:
6181 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6182 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6183 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6184 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6185 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6186 // CHECK9:       omp.inner.for.cond:
6187 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6188 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41
6189 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6190 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6191 // CHECK9:       omp.inner.for.body:
6192 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6193 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
6194 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6195 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !41
6196 // CHECK9-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !41
6197 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6198 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
6199 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i64 [[IDXPROM]]
6200 // CHECK9-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !41
6201 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !41
6202 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6203 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
6204 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM7]]
6205 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX8]], align 8, !llvm.access.group !41
6206 // CHECK9-NEXT:    [[ADD9:%.*]] = fadd double [[TMP22]], [[TMP25]]
6207 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !41
6208 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !41
6209 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
6210 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM10]]
6211 // CHECK9-NEXT:    store double [[ADD9]], double* [[ARRAYIDX11]], align 8, !llvm.access.group !41
6212 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6213 // CHECK9:       omp.body.continue:
6214 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6215 // CHECK9:       omp.inner.for.inc:
6216 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6217 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
6218 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41
6219 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
6220 // CHECK9:       omp.inner.for.end:
6221 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6222 // CHECK9:       omp.loop.exit:
6223 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6224 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
6225 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
6226 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6227 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
6228 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6229 // CHECK9:       .omp.final.then:
6230 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6231 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
6232 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
6233 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
6234 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
6235 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
6236 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6237 // CHECK9:       .omp.final.done:
6238 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6239 // CHECK9:       omp.precond.end:
6240 // CHECK9-NEXT:    ret void
6241 //
6242 //
6243 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
6244 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
6245 // CHECK9-NEXT:  entry:
6246 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
6247 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
6248 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
6249 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
6250 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
6251 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
6252 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
6253 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
6254 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
6255 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
6256 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
6257 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6258 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6259 // CHECK9-NEXT:    ret void
6260 //
6261 //
6262 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..14
6263 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6264 // CHECK9-NEXT:  entry:
6265 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6266 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6267 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
6268 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
6269 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
6270 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
6271 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
6272 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6273 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6274 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6275 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6276 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6277 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6278 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6279 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6280 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6281 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6282 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
6283 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
6284 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6285 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6286 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
6287 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
6288 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
6289 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
6290 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
6291 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
6292 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6293 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
6294 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
6295 // CHECK9-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
6296 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
6297 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
6298 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
6299 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6300 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6301 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
6302 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6303 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6304 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6305 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6306 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6307 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
6308 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6309 // CHECK9:       omp.precond.then:
6310 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6311 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6312 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
6313 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6314 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6315 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6316 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6317 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6318 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6319 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6320 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6321 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6322 // CHECK9:       cond.true:
6323 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6324 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6325 // CHECK9:       cond.false:
6326 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6327 // CHECK9-NEXT:    br label [[COND_END]]
6328 // CHECK9:       cond.end:
6329 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6330 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6331 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6332 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6333 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6334 // CHECK9:       omp.inner.for.cond:
6335 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6336 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
6337 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6338 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6339 // CHECK9:       omp.inner.for.body:
6340 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44
6341 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
6342 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44
6343 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
6344 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !44
6345 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
6346 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !44
6347 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !44
6348 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !44
6349 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6350 // CHECK9:       omp.inner.for.inc:
6351 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6352 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44
6353 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
6354 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44
6355 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
6356 // CHECK9:       omp.inner.for.end:
6357 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6358 // CHECK9:       omp.loop.exit:
6359 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6360 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
6361 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
6362 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6363 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
6364 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6365 // CHECK9:       .omp.final.then:
6366 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6367 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
6368 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
6369 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
6370 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
6371 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
6372 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6373 // CHECK9:       .omp.final.done:
6374 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6375 // CHECK9:       omp.precond.end:
6376 // CHECK9-NEXT:    ret void
6377 //
6378 //
6379 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15
6380 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
6381 // CHECK9-NEXT:  entry:
6382 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6383 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6384 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6385 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6386 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
6387 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
6388 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
6389 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
6390 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
6391 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6392 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6393 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6394 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6395 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6396 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6397 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6398 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6399 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6400 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
6401 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6402 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6403 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6404 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6405 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
6406 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
6407 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
6408 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
6409 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
6410 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6411 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6412 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6413 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6414 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
6415 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6416 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6417 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6418 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6419 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6420 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6421 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6422 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6423 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6424 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6425 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6426 // CHECK9:       omp.precond.then:
6427 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6428 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6429 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6430 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6431 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
6432 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6433 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
6434 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
6435 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
6436 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6437 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6438 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
6439 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6440 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
6441 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
6442 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6443 // CHECK9:       omp.dispatch.cond:
6444 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6445 // CHECK9-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6446 // CHECK9-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
6447 // CHECK9-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
6448 // CHECK9-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6449 // CHECK9:       cond.true:
6450 // CHECK9-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6451 // CHECK9-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
6452 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6453 // CHECK9:       cond.false:
6454 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6455 // CHECK9-NEXT:    br label [[COND_END]]
6456 // CHECK9:       cond.end:
6457 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
6458 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
6459 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6460 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
6461 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
6462 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6463 // CHECK9-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
6464 // CHECK9-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6465 // CHECK9:       omp.dispatch.body:
6466 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6467 // CHECK9:       omp.inner.for.cond:
6468 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6469 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47
6470 // CHECK9-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
6471 // CHECK9-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6472 // CHECK9:       omp.inner.for.body:
6473 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6474 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
6475 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6476 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !47
6477 // CHECK9-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !47
6478 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
6479 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
6480 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i64 [[IDXPROM]]
6481 // CHECK9-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !47
6482 // CHECK9-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !47
6483 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
6484 // CHECK9-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
6485 // CHECK9-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM12]]
6486 // CHECK9-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX13]], align 8, !llvm.access.group !47
6487 // CHECK9-NEXT:    [[ADD14:%.*]] = fadd double [[TMP25]], [[TMP28]]
6488 // CHECK9-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !47
6489 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !47
6490 // CHECK9-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
6491 // CHECK9-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM15]]
6492 // CHECK9-NEXT:    store double [[ADD14]], double* [[ARRAYIDX16]], align 8, !llvm.access.group !47
6493 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6494 // CHECK9:       omp.body.continue:
6495 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6496 // CHECK9:       omp.inner.for.inc:
6497 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6498 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP31]], 1
6499 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47
6500 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
6501 // CHECK9:       omp.inner.for.end:
6502 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6503 // CHECK9:       omp.dispatch.inc:
6504 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6505 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6506 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
6507 // CHECK9-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
6508 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6509 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
6510 // CHECK9-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
6511 // CHECK9-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
6512 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
6513 // CHECK9:       omp.dispatch.end:
6514 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6515 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
6516 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
6517 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6518 // CHECK9-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
6519 // CHECK9-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6520 // CHECK9:       .omp.final.then:
6521 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6522 // CHECK9-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP40]], 0
6523 // CHECK9-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
6524 // CHECK9-NEXT:    [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
6525 // CHECK9-NEXT:    [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
6526 // CHECK9-NEXT:    store i32 [[ADD23]], i32* [[I6]], align 4
6527 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6528 // CHECK9:       .omp.final.done:
6529 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6530 // CHECK9:       omp.precond.end:
6531 // CHECK9-NEXT:    ret void
6532 //
6533 //
6534 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
6535 // CHECK9-SAME: (i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
6536 // CHECK9-NEXT:  entry:
6537 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
6538 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
6539 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
6540 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
6541 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
6542 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
6543 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
6544 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
6545 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6546 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[CONV]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6547 // CHECK9-NEXT:    ret void
6548 //
6549 //
6550 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..18
6551 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6552 // CHECK9-NEXT:  entry:
6553 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6554 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6555 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
6556 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
6557 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
6558 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
6559 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6560 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6561 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6562 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6563 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6564 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6565 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6566 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6567 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6568 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
6569 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6570 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6571 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
6572 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
6573 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
6574 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
6575 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6576 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6577 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6578 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6579 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6580 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6581 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6582 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6583 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6584 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6585 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6586 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6587 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6588 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6589 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6590 // CHECK9:       omp.precond.then:
6591 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6592 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6593 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
6594 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6595 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6596 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6597 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
6598 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6599 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6600 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6601 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
6602 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6603 // CHECK9:       cond.true:
6604 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6605 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6606 // CHECK9:       cond.false:
6607 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6608 // CHECK9-NEXT:    br label [[COND_END]]
6609 // CHECK9:       cond.end:
6610 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
6611 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6612 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6613 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
6614 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6615 // CHECK9:       omp.inner.for.cond:
6616 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6617 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
6618 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
6619 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6620 // CHECK9:       omp.inner.for.body:
6621 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50
6622 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
6623 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50
6624 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
6625 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !50
6626 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6627 // CHECK9:       omp.inner.for.inc:
6628 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6629 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50
6630 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
6631 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50
6632 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
6633 // CHECK9:       omp.inner.for.end:
6634 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6635 // CHECK9:       omp.loop.exit:
6636 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6637 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
6638 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
6639 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6640 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
6641 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6642 // CHECK9:       .omp.final.then:
6643 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6644 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
6645 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6646 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6647 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6648 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
6649 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6650 // CHECK9:       .omp.final.done:
6651 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6652 // CHECK9:       omp.precond.end:
6653 // CHECK9-NEXT:    ret void
6654 //
6655 //
6656 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..19
6657 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6658 // CHECK9-NEXT:  entry:
6659 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6660 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6661 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6662 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6663 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
6664 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
6665 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
6666 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
6667 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6668 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6669 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6670 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6671 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6672 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6673 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6674 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6675 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6676 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
6677 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6678 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6679 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6680 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6681 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
6682 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
6683 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
6684 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
6685 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6686 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6687 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6688 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6689 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6690 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
6691 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6692 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6693 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6694 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6695 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6696 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6697 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6698 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6699 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6700 // CHECK9:       omp.precond.then:
6701 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6702 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6703 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6704 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6705 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
6706 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6707 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
6708 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
6709 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
6710 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6711 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6712 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6713 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6714 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6715 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
6716 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
6717 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6718 // CHECK9:       omp.dispatch.cond:
6719 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6720 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
6721 // CHECK9-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6722 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
6723 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6724 // CHECK9:       omp.dispatch.body:
6725 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6726 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
6727 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6728 // CHECK9:       omp.inner.for.cond:
6729 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6730 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53
6731 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
6732 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6733 // CHECK9:       omp.inner.for.body:
6734 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6735 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
6736 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6737 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !53
6738 // CHECK9-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !53
6739 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6740 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
6741 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i64 [[IDXPROM]]
6742 // CHECK9-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !53
6743 // CHECK9-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !53
6744 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6745 // CHECK9-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
6746 // CHECK9-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP24]], i64 [[IDXPROM6]]
6747 // CHECK9-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX7]], align 8, !llvm.access.group !53
6748 // CHECK9-NEXT:    [[ADD8:%.*]] = fadd double [[TMP23]], [[TMP26]]
6749 // CHECK9-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !53
6750 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !53
6751 // CHECK9-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
6752 // CHECK9-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP27]], i64 [[IDXPROM9]]
6753 // CHECK9-NEXT:    store double [[ADD8]], double* [[ARRAYIDX10]], align 8, !llvm.access.group !53
6754 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
6755 // CHECK9:       omp.body.continue:
6756 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6757 // CHECK9:       omp.inner.for.inc:
6758 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6759 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
6760 // CHECK9-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53
6761 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
6762 // CHECK9:       omp.inner.for.end:
6763 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
6764 // CHECK9:       omp.dispatch.inc:
6765 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
6766 // CHECK9:       omp.dispatch.end:
6767 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6768 // CHECK9-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
6769 // CHECK9-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6770 // CHECK9:       .omp.final.then:
6771 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
6772 // CHECK9-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
6773 // CHECK9-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
6774 // CHECK9-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
6775 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
6776 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
6777 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6778 // CHECK9:       .omp.final.done:
6779 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6780 // CHECK9:       omp.precond.end:
6781 // CHECK9-NEXT:    ret void
6782 //
6783 //
6784 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
6785 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
6786 // CHECK9-NEXT:  entry:
6787 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
6788 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
6789 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
6790 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 8
6791 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 8
6792 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
6793 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
6794 // CHECK9-NEXT:    store double* [[A]], double** [[A_ADDR]], align 8
6795 // CHECK9-NEXT:    store double* [[B]], double** [[B_ADDR]], align 8
6796 // CHECK9-NEXT:    store double* [[C]], double** [[C_ADDR]], align 8
6797 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
6798 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
6799 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
6800 // CHECK9-NEXT:    ret void
6801 //
6802 //
6803 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..22
6804 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
6805 // CHECK9-NEXT:  entry:
6806 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6807 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6808 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
6809 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
6810 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
6811 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
6812 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
6813 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6814 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6815 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6816 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6817 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6818 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6819 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6820 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6821 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6822 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6823 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
6824 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
6825 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6826 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6827 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
6828 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
6829 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
6830 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
6831 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
6832 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
6833 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6834 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 8
6835 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 8
6836 // CHECK9-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 8
6837 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
6838 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
6839 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
6840 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6841 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6842 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
6843 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6844 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6845 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6846 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6847 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6848 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
6849 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6850 // CHECK9:       omp.precond.then:
6851 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
6852 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6853 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
6854 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6855 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6856 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6857 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
6858 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
6859 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6860 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6861 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
6862 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6863 // CHECK9:       cond.true:
6864 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6865 // CHECK9-NEXT:    br label [[COND_END:%.*]]
6866 // CHECK9:       cond.false:
6867 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
6868 // CHECK9-NEXT:    br label [[COND_END]]
6869 // CHECK9:       cond.end:
6870 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
6871 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
6872 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
6873 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
6874 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6875 // CHECK9:       omp.inner.for.cond:
6876 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
6877 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
6878 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6879 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6880 // CHECK9:       omp.inner.for.body:
6881 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !56
6882 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
6883 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56
6884 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
6885 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !56
6886 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
6887 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !56
6888 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !56
6889 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, double**, double**, double**, i64)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !56
6890 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
6891 // CHECK9:       omp.inner.for.inc:
6892 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
6893 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !56
6894 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
6895 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56
6896 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
6897 // CHECK9:       omp.inner.for.end:
6898 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
6899 // CHECK9:       omp.loop.exit:
6900 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6901 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
6902 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
6903 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
6904 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
6905 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6906 // CHECK9:       .omp.final.then:
6907 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6908 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
6909 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
6910 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
6911 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
6912 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
6913 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
6914 // CHECK9:       .omp.final.done:
6915 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
6916 // CHECK9:       omp.precond.end:
6917 // CHECK9-NEXT:    ret void
6918 //
6919 //
6920 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..23
6921 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], double** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
6922 // CHECK9-NEXT:  entry:
6923 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
6924 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
6925 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6926 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6927 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
6928 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 8
6929 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 8
6930 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 8
6931 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
6932 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
6933 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
6934 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6935 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6936 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
6937 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
6938 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
6939 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6940 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6941 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
6942 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
6943 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
6944 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6945 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6946 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
6947 // CHECK9-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 8
6948 // CHECK9-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 8
6949 // CHECK9-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 8
6950 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
6951 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
6952 // CHECK9-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 8
6953 // CHECK9-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 8
6954 // CHECK9-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 8
6955 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
6956 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
6957 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
6958 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6959 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
6960 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6961 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6962 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
6963 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
6964 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
6965 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
6966 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6967 // CHECK9:       omp.precond.then:
6968 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
6969 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
6970 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
6971 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
6972 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
6973 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
6974 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
6975 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
6976 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
6977 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
6978 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
6979 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
6980 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6981 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
6982 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6983 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
6984 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
6985 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
6986 // CHECK9:       omp.dispatch.cond:
6987 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
6988 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
6989 // CHECK9-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
6990 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
6991 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6992 // CHECK9:       omp.dispatch.body:
6993 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
6994 // CHECK9-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
6995 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
6996 // CHECK9:       omp.inner.for.cond:
6997 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
6998 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !59
6999 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
7000 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7001 // CHECK9:       omp.inner.for.body:
7002 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
7003 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
7004 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7005 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !59
7006 // CHECK9-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 8, !llvm.access.group !59
7007 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
7008 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
7009 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i64 [[IDXPROM]]
7010 // CHECK9-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !59
7011 // CHECK9-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 8, !llvm.access.group !59
7012 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
7013 // CHECK9-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
7014 // CHECK9-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP25]], i64 [[IDXPROM8]]
7015 // CHECK9-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX9]], align 8, !llvm.access.group !59
7016 // CHECK9-NEXT:    [[ADD10:%.*]] = fadd double [[TMP24]], [[TMP27]]
7017 // CHECK9-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 8, !llvm.access.group !59
7018 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !59
7019 // CHECK9-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
7020 // CHECK9-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds double, double* [[TMP28]], i64 [[IDXPROM11]]
7021 // CHECK9-NEXT:    store double [[ADD10]], double* [[ARRAYIDX12]], align 8, !llvm.access.group !59
7022 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7023 // CHECK9:       omp.body.continue:
7024 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7025 // CHECK9:       omp.inner.for.inc:
7026 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
7027 // CHECK9-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
7028 // CHECK9-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59
7029 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
7030 // CHECK9:       omp.inner.for.end:
7031 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
7032 // CHECK9:       omp.dispatch.inc:
7033 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
7034 // CHECK9:       omp.dispatch.end:
7035 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7036 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
7037 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7038 // CHECK9:       .omp.final.then:
7039 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7040 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
7041 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
7042 // CHECK9-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
7043 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
7044 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
7045 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7046 // CHECK9:       .omp.final.done:
7047 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
7048 // CHECK9:       omp.precond.end:
7049 // CHECK9-NEXT:    ret void
7050 //
7051 //
7052 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
7053 // CHECK9-SAME: () #[[ATTR3:[0-9]+]] comdat {
7054 // CHECK9-NEXT:  entry:
7055 // CHECK9-NEXT:    [[A:%.*]] = alloca i32*, align 8
7056 // CHECK9-NEXT:    [[B:%.*]] = alloca i32*, align 8
7057 // CHECK9-NEXT:    [[C:%.*]] = alloca i32*, align 8
7058 // CHECK9-NEXT:    [[N:%.*]] = alloca i32, align 4
7059 // CHECK9-NEXT:    [[CH:%.*]] = alloca i32, align 4
7060 // CHECK9-NEXT:    [[N_CASTED:%.*]] = alloca i64, align 8
7061 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8
7062 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8
7063 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8
7064 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7065 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7066 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7067 // CHECK9-NEXT:    [[N_CASTED3:%.*]] = alloca i64, align 8
7068 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [4 x i8*], align 8
7069 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS6:%.*]] = alloca [4 x i8*], align 8
7070 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [4 x i8*], align 8
7071 // CHECK9-NEXT:    [[_TMP8:%.*]] = alloca i32, align 4
7072 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
7073 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
7074 // CHECK9-NEXT:    [[CH_CASTED:%.*]] = alloca i64, align 8
7075 // CHECK9-NEXT:    [[N_CASTED18:%.*]] = alloca i64, align 8
7076 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [5 x i8*], align 8
7077 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [5 x i8*], align 8
7078 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [5 x i8*], align 8
7079 // CHECK9-NEXT:    [[_TMP23:%.*]] = alloca i32, align 4
7080 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
7081 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
7082 // CHECK9-NEXT:    [[N_CASTED32:%.*]] = alloca i64, align 8
7083 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [4 x i8*], align 8
7084 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS35:%.*]] = alloca [4 x i8*], align 8
7085 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [4 x i8*], align 8
7086 // CHECK9-NEXT:    [[_TMP37:%.*]] = alloca i32, align 4
7087 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
7088 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
7089 // CHECK9-NEXT:    [[CH_CASTED46:%.*]] = alloca i64, align 8
7090 // CHECK9-NEXT:    [[N_CASTED48:%.*]] = alloca i64, align 8
7091 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [5 x i8*], align 8
7092 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS51:%.*]] = alloca [5 x i8*], align 8
7093 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [5 x i8*], align 8
7094 // CHECK9-NEXT:    [[_TMP53:%.*]] = alloca i32, align 4
7095 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
7096 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
7097 // CHECK9-NEXT:    [[N_CASTED62:%.*]] = alloca i64, align 8
7098 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS64:%.*]] = alloca [4 x i8*], align 8
7099 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS65:%.*]] = alloca [4 x i8*], align 8
7100 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS66:%.*]] = alloca [4 x i8*], align 8
7101 // CHECK9-NEXT:    [[_TMP67:%.*]] = alloca i32, align 4
7102 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
7103 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
7104 // CHECK9-NEXT:    [[CH_CASTED76:%.*]] = alloca i64, align 8
7105 // CHECK9-NEXT:    [[N_CASTED78:%.*]] = alloca i64, align 8
7106 // CHECK9-NEXT:    [[DOTOFFLOAD_BASEPTRS80:%.*]] = alloca [5 x i8*], align 8
7107 // CHECK9-NEXT:    [[DOTOFFLOAD_PTRS81:%.*]] = alloca [5 x i8*], align 8
7108 // CHECK9-NEXT:    [[DOTOFFLOAD_MAPPERS82:%.*]] = alloca [5 x i8*], align 8
7109 // CHECK9-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
7110 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
7111 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
7112 // CHECK9-NEXT:    store i32 10000, i32* [[N]], align 4
7113 // CHECK9-NEXT:    store i32 100, i32* [[CH]], align 4
7114 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
7115 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
7116 // CHECK9-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
7117 // CHECK9-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_CASTED]], align 8
7118 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 8
7119 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 8
7120 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 8
7121 // CHECK9-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7122 // CHECK9-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
7123 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
7124 // CHECK9-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7125 // CHECK9-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64*
7126 // CHECK9-NEXT:    store i64 [[TMP1]], i64* [[TMP8]], align 8
7127 // CHECK9-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
7128 // CHECK9-NEXT:    store i8* null, i8** [[TMP9]], align 8
7129 // CHECK9-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
7130 // CHECK9-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
7131 // CHECK9-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 8
7132 // CHECK9-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
7133 // CHECK9-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
7134 // CHECK9-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 8
7135 // CHECK9-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
7136 // CHECK9-NEXT:    store i8* null, i8** [[TMP14]], align 8
7137 // CHECK9-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
7138 // CHECK9-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
7139 // CHECK9-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 8
7140 // CHECK9-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
7141 // CHECK9-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
7142 // CHECK9-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 8
7143 // CHECK9-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
7144 // CHECK9-NEXT:    store i8* null, i8** [[TMP19]], align 8
7145 // CHECK9-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
7146 // CHECK9-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
7147 // CHECK9-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 8
7148 // CHECK9-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
7149 // CHECK9-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
7150 // CHECK9-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 8
7151 // CHECK9-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
7152 // CHECK9-NEXT:    store i8* null, i8** [[TMP24]], align 8
7153 // CHECK9-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7154 // CHECK9-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7155 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
7156 // CHECK9-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
7157 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7158 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
7159 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7160 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7161 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7162 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7163 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
7164 // CHECK9-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
7165 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
7166 // CHECK9-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7167 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
7168 // CHECK9-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
7169 // CHECK9:       omp_offload.failed:
7170 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i64 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
7171 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT]]
7172 // CHECK9:       omp_offload.cont:
7173 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
7174 // CHECK9-NEXT:    [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
7175 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[CONV4]], align 4
7176 // CHECK9-NEXT:    [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
7177 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 8
7178 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 8
7179 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 8
7180 // CHECK9-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
7181 // CHECK9-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
7182 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP39]], align 8
7183 // CHECK9-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
7184 // CHECK9-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64*
7185 // CHECK9-NEXT:    store i64 [[TMP34]], i64* [[TMP41]], align 8
7186 // CHECK9-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
7187 // CHECK9-NEXT:    store i8* null, i8** [[TMP42]], align 8
7188 // CHECK9-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
7189 // CHECK9-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
7190 // CHECK9-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 8
7191 // CHECK9-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
7192 // CHECK9-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
7193 // CHECK9-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 8
7194 // CHECK9-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
7195 // CHECK9-NEXT:    store i8* null, i8** [[TMP47]], align 8
7196 // CHECK9-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
7197 // CHECK9-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
7198 // CHECK9-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 8
7199 // CHECK9-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
7200 // CHECK9-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
7201 // CHECK9-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 8
7202 // CHECK9-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
7203 // CHECK9-NEXT:    store i8* null, i8** [[TMP52]], align 8
7204 // CHECK9-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 3
7205 // CHECK9-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
7206 // CHECK9-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 8
7207 // CHECK9-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 3
7208 // CHECK9-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
7209 // CHECK9-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 8
7210 // CHECK9-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 3
7211 // CHECK9-NEXT:    store i8* null, i8** [[TMP57]], align 8
7212 // CHECK9-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
7213 // CHECK9-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
7214 // CHECK9-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
7215 // CHECK9-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_9]], align 4
7216 // CHECK9-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
7217 // CHECK9-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP61]], 0
7218 // CHECK9-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
7219 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
7220 // CHECK9-NEXT:    store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
7221 // CHECK9-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
7222 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP62]], 1
7223 // CHECK9-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD14]] to i64
7224 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
7225 // CHECK9-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7226 // CHECK9-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
7227 // CHECK9-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
7228 // CHECK9:       omp_offload.failed15:
7229 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i64 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
7230 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT16]]
7231 // CHECK9:       omp_offload.cont16:
7232 // CHECK9-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
7233 // CHECK9-NEXT:    [[CONV17:%.*]] = bitcast i64* [[CH_CASTED]] to i32*
7234 // CHECK9-NEXT:    store i32 [[TMP66]], i32* [[CONV17]], align 4
7235 // CHECK9-NEXT:    [[TMP67:%.*]] = load i64, i64* [[CH_CASTED]], align 8
7236 // CHECK9-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
7237 // CHECK9-NEXT:    [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32*
7238 // CHECK9-NEXT:    store i32 [[TMP68]], i32* [[CONV19]], align 4
7239 // CHECK9-NEXT:    [[TMP69:%.*]] = load i64, i64* [[N_CASTED18]], align 8
7240 // CHECK9-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 8
7241 // CHECK9-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 8
7242 // CHECK9-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 8
7243 // CHECK9-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
7244 // CHECK9-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64*
7245 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP74]], align 8
7246 // CHECK9-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
7247 // CHECK9-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
7248 // CHECK9-NEXT:    store i64 [[TMP67]], i64* [[TMP76]], align 8
7249 // CHECK9-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
7250 // CHECK9-NEXT:    store i8* null, i8** [[TMP77]], align 8
7251 // CHECK9-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1
7252 // CHECK9-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64*
7253 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP79]], align 8
7254 // CHECK9-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1
7255 // CHECK9-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64*
7256 // CHECK9-NEXT:    store i64 [[TMP69]], i64* [[TMP81]], align 8
7257 // CHECK9-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1
7258 // CHECK9-NEXT:    store i8* null, i8** [[TMP82]], align 8
7259 // CHECK9-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2
7260 // CHECK9-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
7261 // CHECK9-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 8
7262 // CHECK9-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2
7263 // CHECK9-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
7264 // CHECK9-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 8
7265 // CHECK9-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2
7266 // CHECK9-NEXT:    store i8* null, i8** [[TMP87]], align 8
7267 // CHECK9-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3
7268 // CHECK9-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
7269 // CHECK9-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 8
7270 // CHECK9-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3
7271 // CHECK9-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
7272 // CHECK9-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 8
7273 // CHECK9-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3
7274 // CHECK9-NEXT:    store i8* null, i8** [[TMP92]], align 8
7275 // CHECK9-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4
7276 // CHECK9-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
7277 // CHECK9-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 8
7278 // CHECK9-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4
7279 // CHECK9-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
7280 // CHECK9-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 8
7281 // CHECK9-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4
7282 // CHECK9-NEXT:    store i8* null, i8** [[TMP97]], align 8
7283 // CHECK9-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
7284 // CHECK9-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
7285 // CHECK9-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
7286 // CHECK9-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_24]], align 4
7287 // CHECK9-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
7288 // CHECK9-NEXT:    [[SUB26:%.*]] = sub nsw i32 [[TMP101]], 0
7289 // CHECK9-NEXT:    [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
7290 // CHECK9-NEXT:    [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
7291 // CHECK9-NEXT:    store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
7292 // CHECK9-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
7293 // CHECK9-NEXT:    [[ADD29:%.*]] = add nsw i32 [[TMP102]], 1
7294 // CHECK9-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD29]] to i64
7295 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
7296 // CHECK9-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7297 // CHECK9-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
7298 // CHECK9-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
7299 // CHECK9:       omp_offload.failed30:
7300 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i64 [[TMP67]], i64 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
7301 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
7302 // CHECK9:       omp_offload.cont31:
7303 // CHECK9-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
7304 // CHECK9-NEXT:    [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32*
7305 // CHECK9-NEXT:    store i32 [[TMP106]], i32* [[CONV33]], align 4
7306 // CHECK9-NEXT:    [[TMP107:%.*]] = load i64, i64* [[N_CASTED32]], align 8
7307 // CHECK9-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 8
7308 // CHECK9-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 8
7309 // CHECK9-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 8
7310 // CHECK9-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
7311 // CHECK9-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
7312 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP112]], align 8
7313 // CHECK9-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
7314 // CHECK9-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
7315 // CHECK9-NEXT:    store i64 [[TMP107]], i64* [[TMP114]], align 8
7316 // CHECK9-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
7317 // CHECK9-NEXT:    store i8* null, i8** [[TMP115]], align 8
7318 // CHECK9-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
7319 // CHECK9-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
7320 // CHECK9-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 8
7321 // CHECK9-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
7322 // CHECK9-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
7323 // CHECK9-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 8
7324 // CHECK9-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
7325 // CHECK9-NEXT:    store i8* null, i8** [[TMP120]], align 8
7326 // CHECK9-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
7327 // CHECK9-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
7328 // CHECK9-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 8
7329 // CHECK9-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
7330 // CHECK9-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
7331 // CHECK9-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 8
7332 // CHECK9-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
7333 // CHECK9-NEXT:    store i8* null, i8** [[TMP125]], align 8
7334 // CHECK9-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 3
7335 // CHECK9-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
7336 // CHECK9-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 8
7337 // CHECK9-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 3
7338 // CHECK9-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
7339 // CHECK9-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 8
7340 // CHECK9-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 3
7341 // CHECK9-NEXT:    store i8* null, i8** [[TMP130]], align 8
7342 // CHECK9-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
7343 // CHECK9-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
7344 // CHECK9-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
7345 // CHECK9-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_38]], align 4
7346 // CHECK9-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
7347 // CHECK9-NEXT:    [[SUB40:%.*]] = sub nsw i32 [[TMP134]], 0
7348 // CHECK9-NEXT:    [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
7349 // CHECK9-NEXT:    [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
7350 // CHECK9-NEXT:    store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
7351 // CHECK9-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
7352 // CHECK9-NEXT:    [[ADD43:%.*]] = add nsw i32 [[TMP135]], 1
7353 // CHECK9-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD43]] to i64
7354 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
7355 // CHECK9-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7356 // CHECK9-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
7357 // CHECK9-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
7358 // CHECK9:       omp_offload.failed44:
7359 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i64 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
7360 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT45]]
7361 // CHECK9:       omp_offload.cont45:
7362 // CHECK9-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
7363 // CHECK9-NEXT:    [[CONV47:%.*]] = bitcast i64* [[CH_CASTED46]] to i32*
7364 // CHECK9-NEXT:    store i32 [[TMP139]], i32* [[CONV47]], align 4
7365 // CHECK9-NEXT:    [[TMP140:%.*]] = load i64, i64* [[CH_CASTED46]], align 8
7366 // CHECK9-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
7367 // CHECK9-NEXT:    [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32*
7368 // CHECK9-NEXT:    store i32 [[TMP141]], i32* [[CONV49]], align 4
7369 // CHECK9-NEXT:    [[TMP142:%.*]] = load i64, i64* [[N_CASTED48]], align 8
7370 // CHECK9-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 8
7371 // CHECK9-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 8
7372 // CHECK9-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 8
7373 // CHECK9-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
7374 // CHECK9-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
7375 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP147]], align 8
7376 // CHECK9-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
7377 // CHECK9-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64*
7378 // CHECK9-NEXT:    store i64 [[TMP140]], i64* [[TMP149]], align 8
7379 // CHECK9-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
7380 // CHECK9-NEXT:    store i8* null, i8** [[TMP150]], align 8
7381 // CHECK9-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
7382 // CHECK9-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i64*
7383 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP152]], align 8
7384 // CHECK9-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
7385 // CHECK9-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i64*
7386 // CHECK9-NEXT:    store i64 [[TMP142]], i64* [[TMP154]], align 8
7387 // CHECK9-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
7388 // CHECK9-NEXT:    store i8* null, i8** [[TMP155]], align 8
7389 // CHECK9-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
7390 // CHECK9-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
7391 // CHECK9-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 8
7392 // CHECK9-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
7393 // CHECK9-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
7394 // CHECK9-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 8
7395 // CHECK9-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
7396 // CHECK9-NEXT:    store i8* null, i8** [[TMP160]], align 8
7397 // CHECK9-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
7398 // CHECK9-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
7399 // CHECK9-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 8
7400 // CHECK9-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
7401 // CHECK9-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
7402 // CHECK9-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 8
7403 // CHECK9-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
7404 // CHECK9-NEXT:    store i8* null, i8** [[TMP165]], align 8
7405 // CHECK9-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 4
7406 // CHECK9-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
7407 // CHECK9-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 8
7408 // CHECK9-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 4
7409 // CHECK9-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
7410 // CHECK9-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 8
7411 // CHECK9-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 4
7412 // CHECK9-NEXT:    store i8* null, i8** [[TMP170]], align 8
7413 // CHECK9-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
7414 // CHECK9-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
7415 // CHECK9-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
7416 // CHECK9-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_54]], align 4
7417 // CHECK9-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
7418 // CHECK9-NEXT:    [[SUB56:%.*]] = sub nsw i32 [[TMP174]], 0
7419 // CHECK9-NEXT:    [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
7420 // CHECK9-NEXT:    [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
7421 // CHECK9-NEXT:    store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
7422 // CHECK9-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
7423 // CHECK9-NEXT:    [[ADD59:%.*]] = add nsw i32 [[TMP175]], 1
7424 // CHECK9-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD59]] to i64
7425 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
7426 // CHECK9-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7427 // CHECK9-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
7428 // CHECK9-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
7429 // CHECK9:       omp_offload.failed60:
7430 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i64 [[TMP140]], i64 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
7431 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT61]]
7432 // CHECK9:       omp_offload.cont61:
7433 // CHECK9-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
7434 // CHECK9-NEXT:    [[CONV63:%.*]] = bitcast i64* [[N_CASTED62]] to i32*
7435 // CHECK9-NEXT:    store i32 [[TMP179]], i32* [[CONV63]], align 4
7436 // CHECK9-NEXT:    [[TMP180:%.*]] = load i64, i64* [[N_CASTED62]], align 8
7437 // CHECK9-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 8
7438 // CHECK9-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 8
7439 // CHECK9-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 8
7440 // CHECK9-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
7441 // CHECK9-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i64*
7442 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP185]], align 8
7443 // CHECK9-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
7444 // CHECK9-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i64*
7445 // CHECK9-NEXT:    store i64 [[TMP180]], i64* [[TMP187]], align 8
7446 // CHECK9-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 0
7447 // CHECK9-NEXT:    store i8* null, i8** [[TMP188]], align 8
7448 // CHECK9-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 1
7449 // CHECK9-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
7450 // CHECK9-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 8
7451 // CHECK9-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 1
7452 // CHECK9-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
7453 // CHECK9-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 8
7454 // CHECK9-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 1
7455 // CHECK9-NEXT:    store i8* null, i8** [[TMP193]], align 8
7456 // CHECK9-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 2
7457 // CHECK9-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
7458 // CHECK9-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 8
7459 // CHECK9-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 2
7460 // CHECK9-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
7461 // CHECK9-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 8
7462 // CHECK9-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 2
7463 // CHECK9-NEXT:    store i8* null, i8** [[TMP198]], align 8
7464 // CHECK9-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 3
7465 // CHECK9-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
7466 // CHECK9-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 8
7467 // CHECK9-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 3
7468 // CHECK9-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
7469 // CHECK9-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 8
7470 // CHECK9-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS66]], i64 0, i64 3
7471 // CHECK9-NEXT:    store i8* null, i8** [[TMP203]], align 8
7472 // CHECK9-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS64]], i32 0, i32 0
7473 // CHECK9-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS65]], i32 0, i32 0
7474 // CHECK9-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
7475 // CHECK9-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_68]], align 4
7476 // CHECK9-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_68]], align 4
7477 // CHECK9-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP207]], 0
7478 // CHECK9-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
7479 // CHECK9-NEXT:    [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
7480 // CHECK9-NEXT:    store i32 [[SUB72]], i32* [[DOTCAPTURE_EXPR_69]], align 4
7481 // CHECK9-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_69]], align 4
7482 // CHECK9-NEXT:    [[ADD73:%.*]] = add nsw i32 [[TMP208]], 1
7483 // CHECK9-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD73]] to i64
7484 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
7485 // CHECK9-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7486 // CHECK9-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
7487 // CHECK9-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED74:%.*]], label [[OMP_OFFLOAD_CONT75:%.*]]
7488 // CHECK9:       omp_offload.failed74:
7489 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i64 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
7490 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT75]]
7491 // CHECK9:       omp_offload.cont75:
7492 // CHECK9-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
7493 // CHECK9-NEXT:    [[CONV77:%.*]] = bitcast i64* [[CH_CASTED76]] to i32*
7494 // CHECK9-NEXT:    store i32 [[TMP212]], i32* [[CONV77]], align 4
7495 // CHECK9-NEXT:    [[TMP213:%.*]] = load i64, i64* [[CH_CASTED76]], align 8
7496 // CHECK9-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
7497 // CHECK9-NEXT:    [[CONV79:%.*]] = bitcast i64* [[N_CASTED78]] to i32*
7498 // CHECK9-NEXT:    store i32 [[TMP214]], i32* [[CONV79]], align 4
7499 // CHECK9-NEXT:    [[TMP215:%.*]] = load i64, i64* [[N_CASTED78]], align 8
7500 // CHECK9-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 8
7501 // CHECK9-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 8
7502 // CHECK9-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 8
7503 // CHECK9-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
7504 // CHECK9-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i64*
7505 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP220]], align 8
7506 // CHECK9-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
7507 // CHECK9-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i64*
7508 // CHECK9-NEXT:    store i64 [[TMP213]], i64* [[TMP222]], align 8
7509 // CHECK9-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 0
7510 // CHECK9-NEXT:    store i8* null, i8** [[TMP223]], align 8
7511 // CHECK9-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 1
7512 // CHECK9-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i64*
7513 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP225]], align 8
7514 // CHECK9-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 1
7515 // CHECK9-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i64*
7516 // CHECK9-NEXT:    store i64 [[TMP215]], i64* [[TMP227]], align 8
7517 // CHECK9-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 1
7518 // CHECK9-NEXT:    store i8* null, i8** [[TMP228]], align 8
7519 // CHECK9-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 2
7520 // CHECK9-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
7521 // CHECK9-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 8
7522 // CHECK9-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 2
7523 // CHECK9-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
7524 // CHECK9-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 8
7525 // CHECK9-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 2
7526 // CHECK9-NEXT:    store i8* null, i8** [[TMP233]], align 8
7527 // CHECK9-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 3
7528 // CHECK9-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
7529 // CHECK9-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 8
7530 // CHECK9-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 3
7531 // CHECK9-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
7532 // CHECK9-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 8
7533 // CHECK9-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 3
7534 // CHECK9-NEXT:    store i8* null, i8** [[TMP238]], align 8
7535 // CHECK9-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 4
7536 // CHECK9-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
7537 // CHECK9-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 8
7538 // CHECK9-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 4
7539 // CHECK9-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
7540 // CHECK9-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 8
7541 // CHECK9-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS82]], i64 0, i64 4
7542 // CHECK9-NEXT:    store i8* null, i8** [[TMP243]], align 8
7543 // CHECK9-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS80]], i32 0, i32 0
7544 // CHECK9-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS81]], i32 0, i32 0
7545 // CHECK9-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
7546 // CHECK9-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_84]], align 4
7547 // CHECK9-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
7548 // CHECK9-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP247]], 0
7549 // CHECK9-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
7550 // CHECK9-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
7551 // CHECK9-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
7552 // CHECK9-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
7553 // CHECK9-NEXT:    [[ADD89:%.*]] = add nsw i32 [[TMP248]], 1
7554 // CHECK9-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD89]] to i64
7555 // CHECK9-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
7556 // CHECK9-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
7557 // CHECK9-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
7558 // CHECK9-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED90:%.*]], label [[OMP_OFFLOAD_CONT91:%.*]]
7559 // CHECK9:       omp_offload.failed90:
7560 // CHECK9-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i64 [[TMP213]], i64 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
7561 // CHECK9-NEXT:    br label [[OMP_OFFLOAD_CONT91]]
7562 // CHECK9:       omp_offload.cont91:
7563 // CHECK9-NEXT:    ret i32 0
7564 //
7565 //
7566 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
7567 // CHECK9-SAME: (i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
7568 // CHECK9-NEXT:  entry:
7569 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7570 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
7571 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
7572 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
7573 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7574 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
7575 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
7576 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
7577 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7578 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
7579 // CHECK9-NEXT:    ret void
7580 //
7581 //
7582 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..26
7583 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
7584 // CHECK9-NEXT:  entry:
7585 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7586 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7587 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
7588 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
7589 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
7590 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
7591 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7592 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7593 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7594 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7595 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7596 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7597 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7598 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7599 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7600 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
7601 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7602 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7603 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
7604 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
7605 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
7606 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
7607 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
7608 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
7609 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
7610 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
7611 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7612 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7613 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7614 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7615 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7616 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7617 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7618 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
7619 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7620 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7621 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7622 // CHECK9:       omp.precond.then:
7623 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7624 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7625 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
7626 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7627 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7628 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7629 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
7630 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7631 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7632 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7633 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
7634 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7635 // CHECK9:       cond.true:
7636 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7637 // CHECK9-NEXT:    br label [[COND_END:%.*]]
7638 // CHECK9:       cond.false:
7639 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7640 // CHECK9-NEXT:    br label [[COND_END]]
7641 // CHECK9:       cond.end:
7642 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
7643 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7644 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7645 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
7646 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7647 // CHECK9:       omp.inner.for.cond:
7648 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
7649 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
7650 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
7651 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7652 // CHECK9:       omp.inner.for.body:
7653 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !62
7654 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
7655 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !62
7656 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
7657 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !62
7658 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7659 // CHECK9:       omp.inner.for.inc:
7660 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
7661 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !62
7662 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
7663 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62
7664 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
7665 // CHECK9:       omp.inner.for.end:
7666 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7667 // CHECK9:       omp.loop.exit:
7668 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7669 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
7670 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
7671 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7672 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
7673 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7674 // CHECK9:       .omp.final.then:
7675 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7676 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
7677 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
7678 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
7679 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
7680 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
7681 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7682 // CHECK9:       .omp.final.done:
7683 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
7684 // CHECK9:       omp.precond.end:
7685 // CHECK9-NEXT:    ret void
7686 //
7687 //
7688 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..27
7689 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
7690 // CHECK9-NEXT:  entry:
7691 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7692 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7693 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7694 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7695 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
7696 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
7697 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
7698 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
7699 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7700 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7701 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7702 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7703 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7704 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7705 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7706 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7707 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7708 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
7709 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7710 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7711 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7712 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7713 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
7714 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
7715 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
7716 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
7717 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
7718 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
7719 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
7720 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
7721 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7722 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7723 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7724 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7725 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7726 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7727 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7728 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
7729 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7730 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7731 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7732 // CHECK9:       omp.precond.then:
7733 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7734 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7735 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7736 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7737 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
7738 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7739 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
7740 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
7741 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
7742 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7743 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7744 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7745 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
7746 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7747 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7748 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7749 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
7750 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7751 // CHECK9:       cond.true:
7752 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7753 // CHECK9-NEXT:    br label [[COND_END:%.*]]
7754 // CHECK9:       cond.false:
7755 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
7756 // CHECK9-NEXT:    br label [[COND_END]]
7757 // CHECK9:       cond.end:
7758 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
7759 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
7760 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
7761 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
7762 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7763 // CHECK9:       omp.inner.for.cond:
7764 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
7765 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !65
7766 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
7767 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7768 // CHECK9:       omp.inner.for.body:
7769 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
7770 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
7771 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7772 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !65
7773 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !65
7774 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
7775 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
7776 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
7777 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !65
7778 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !65
7779 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
7780 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
7781 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
7782 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !65
7783 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
7784 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !65
7785 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !65
7786 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
7787 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
7788 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !65
7789 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
7790 // CHECK9:       omp.body.continue:
7791 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7792 // CHECK9:       omp.inner.for.inc:
7793 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
7794 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
7795 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65
7796 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
7797 // CHECK9:       omp.inner.for.end:
7798 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7799 // CHECK9:       omp.loop.exit:
7800 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7801 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
7802 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
7803 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7804 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
7805 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7806 // CHECK9:       .omp.final.then:
7807 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7808 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
7809 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
7810 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
7811 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
7812 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
7813 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7814 // CHECK9:       .omp.final.done:
7815 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
7816 // CHECK9:       omp.precond.end:
7817 // CHECK9-NEXT:    ret void
7818 //
7819 //
7820 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
7821 // CHECK9-SAME: (i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
7822 // CHECK9-NEXT:  entry:
7823 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
7824 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
7825 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
7826 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
7827 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
7828 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
7829 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
7830 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
7831 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
7832 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
7833 // CHECK9-NEXT:    ret void
7834 //
7835 //
7836 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..30
7837 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
7838 // CHECK9-NEXT:  entry:
7839 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7840 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7841 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
7842 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
7843 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
7844 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
7845 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7846 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7847 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7848 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7849 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7850 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7851 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7852 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7853 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7854 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
7855 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7856 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7857 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
7858 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
7859 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
7860 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
7861 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
7862 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
7863 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
7864 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
7865 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7866 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7867 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7868 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7869 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7870 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7871 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7872 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
7873 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7874 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7875 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7876 // CHECK9:       omp.precond.then:
7877 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
7878 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7879 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
7880 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7881 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7882 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7883 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
7884 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
7885 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7886 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7887 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
7888 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7889 // CHECK9:       cond.true:
7890 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7891 // CHECK9-NEXT:    br label [[COND_END:%.*]]
7892 // CHECK9:       cond.false:
7893 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
7894 // CHECK9-NEXT:    br label [[COND_END]]
7895 // CHECK9:       cond.end:
7896 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
7897 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
7898 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
7899 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
7900 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
7901 // CHECK9:       omp.inner.for.cond:
7902 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
7903 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
7904 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
7905 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7906 // CHECK9:       omp.inner.for.body:
7907 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !68
7908 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
7909 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !68
7910 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
7911 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !68
7912 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
7913 // CHECK9:       omp.inner.for.inc:
7914 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
7915 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !68
7916 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
7917 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !68
7918 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
7919 // CHECK9:       omp.inner.for.end:
7920 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
7921 // CHECK9:       omp.loop.exit:
7922 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7923 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
7924 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
7925 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
7926 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
7927 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7928 // CHECK9:       .omp.final.then:
7929 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7930 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
7931 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
7932 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
7933 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
7934 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
7935 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
7936 // CHECK9:       .omp.final.done:
7937 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
7938 // CHECK9:       omp.precond.end:
7939 // CHECK9-NEXT:    ret void
7940 //
7941 //
7942 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..31
7943 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
7944 // CHECK9-NEXT:  entry:
7945 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
7946 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
7947 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7948 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7949 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
7950 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
7951 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
7952 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
7953 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
7954 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
7955 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7956 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7957 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
7958 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
7959 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
7960 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7961 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7962 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
7963 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
7964 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
7965 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7966 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7967 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
7968 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
7969 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
7970 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
7971 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
7972 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
7973 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
7974 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
7975 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
7976 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
7977 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7978 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
7979 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7980 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7981 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
7982 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
7983 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
7984 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
7985 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
7986 // CHECK9:       omp.precond.then:
7987 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
7988 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
7989 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
7990 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
7991 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
7992 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
7993 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
7994 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
7995 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
7996 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
7997 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
7998 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
7999 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8000 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8001 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8002 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8003 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8004 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8005 // CHECK9:       cond.true:
8006 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8007 // CHECK9-NEXT:    br label [[COND_END:%.*]]
8008 // CHECK9:       cond.false:
8009 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8010 // CHECK9-NEXT:    br label [[COND_END]]
8011 // CHECK9:       cond.end:
8012 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8013 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8014 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8015 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8016 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8017 // CHECK9:       omp.inner.for.cond:
8018 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
8019 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !71
8020 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8021 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8022 // CHECK9:       omp.inner.for.body:
8023 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
8024 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
8025 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8026 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !71
8027 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !71
8028 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
8029 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
8030 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
8031 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !71
8032 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !71
8033 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
8034 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
8035 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
8036 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !71
8037 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
8038 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !71
8039 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !71
8040 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
8041 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
8042 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !71
8043 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8044 // CHECK9:       omp.body.continue:
8045 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8046 // CHECK9:       omp.inner.for.inc:
8047 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
8048 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
8049 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71
8050 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
8051 // CHECK9:       omp.inner.for.end:
8052 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8053 // CHECK9:       omp.loop.exit:
8054 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8055 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
8056 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
8057 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8058 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8059 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8060 // CHECK9:       .omp.final.then:
8061 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8062 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
8063 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
8064 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
8065 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
8066 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
8067 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8068 // CHECK9:       .omp.final.done:
8069 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
8070 // CHECK9:       omp.precond.end:
8071 // CHECK9-NEXT:    ret void
8072 //
8073 //
8074 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
8075 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
8076 // CHECK9-NEXT:  entry:
8077 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
8078 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8079 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
8080 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
8081 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
8082 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
8083 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8084 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
8085 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
8086 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
8087 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
8088 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8089 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
8090 // CHECK9-NEXT:    ret void
8091 //
8092 //
8093 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..34
8094 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8095 // CHECK9-NEXT:  entry:
8096 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8097 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8098 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
8099 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
8100 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
8101 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
8102 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
8103 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8104 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8105 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8106 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8107 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
8108 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8109 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8110 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8111 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8112 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
8113 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8114 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8115 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
8116 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
8117 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
8118 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
8119 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
8120 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
8121 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8122 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8123 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8124 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8125 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
8126 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
8127 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8128 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
8129 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8130 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8131 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8132 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
8133 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8134 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
8135 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8136 // CHECK9:       omp.precond.then:
8137 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8138 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8139 // CHECK9-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
8140 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8141 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8142 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
8143 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8144 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8145 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
8146 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8147 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8148 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8149 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8150 // CHECK9:       cond.true:
8151 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8152 // CHECK9-NEXT:    br label [[COND_END:%.*]]
8153 // CHECK9:       cond.false:
8154 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8155 // CHECK9-NEXT:    br label [[COND_END]]
8156 // CHECK9:       cond.end:
8157 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8158 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8159 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8160 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8161 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8162 // CHECK9:       omp.inner.for.cond:
8163 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
8164 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
8165 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
8166 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
8167 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8168 // CHECK9:       omp.inner.for.body:
8169 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
8170 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8171 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8172 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
8173 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !74
8174 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8175 // CHECK9:       omp.inner.for.inc:
8176 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
8177 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
8178 // CHECK9-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
8179 // CHECK9-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
8180 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
8181 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
8182 // CHECK9-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
8183 // CHECK9-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
8184 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8185 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !74
8186 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
8187 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8188 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8189 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
8190 // CHECK9-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
8191 // CHECK9-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
8192 // CHECK9:       cond.true10:
8193 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !74
8194 // CHECK9-NEXT:    br label [[COND_END12:%.*]]
8195 // CHECK9:       cond.false11:
8196 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8197 // CHECK9-NEXT:    br label [[COND_END12]]
8198 // CHECK9:       cond.end12:
8199 // CHECK9-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE10]] ], [ [[TMP32]], [[COND_FALSE11]] ]
8200 // CHECK9-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !74
8201 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !74
8202 // CHECK9-NEXT:    store i32 [[TMP33]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !74
8203 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
8204 // CHECK9:       omp.inner.for.end:
8205 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8206 // CHECK9:       omp.loop.exit:
8207 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8208 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP34]], align 4
8209 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP35]])
8210 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8211 // CHECK9-NEXT:    [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
8212 // CHECK9-NEXT:    br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8213 // CHECK9:       .omp.final.then:
8214 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8215 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP38]], 0
8216 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
8217 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
8218 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
8219 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
8220 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8221 // CHECK9:       .omp.final.done:
8222 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
8223 // CHECK9:       omp.precond.end:
8224 // CHECK9-NEXT:    ret void
8225 //
8226 //
8227 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..35
8228 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8229 // CHECK9-NEXT:  entry:
8230 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8231 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8232 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8233 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8234 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
8235 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
8236 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
8237 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
8238 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8239 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8240 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8241 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8242 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
8243 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8244 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8245 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8246 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8247 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
8248 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8249 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8250 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8251 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8252 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
8253 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
8254 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
8255 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
8256 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8257 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8258 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8259 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8260 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8261 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8262 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8263 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8264 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8265 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8266 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8267 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
8268 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8269 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8270 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8271 // CHECK9:       omp.precond.then:
8272 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8273 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8274 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8275 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8276 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
8277 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8278 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
8279 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
8280 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
8281 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8282 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8283 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8284 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8285 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8286 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8287 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8288 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8289 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8290 // CHECK9:       cond.true:
8291 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8292 // CHECK9-NEXT:    br label [[COND_END:%.*]]
8293 // CHECK9:       cond.false:
8294 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8295 // CHECK9-NEXT:    br label [[COND_END]]
8296 // CHECK9:       cond.end:
8297 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8298 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8299 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8300 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8301 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8302 // CHECK9:       omp.inner.for.cond:
8303 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
8304 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !77
8305 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8306 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8307 // CHECK9:       omp.inner.for.body:
8308 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
8309 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
8310 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8311 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !77
8312 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !77
8313 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
8314 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
8315 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
8316 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !77
8317 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !77
8318 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
8319 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
8320 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
8321 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !77
8322 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
8323 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !77
8324 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !77
8325 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
8326 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
8327 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !77
8328 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8329 // CHECK9:       omp.body.continue:
8330 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8331 // CHECK9:       omp.inner.for.inc:
8332 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
8333 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
8334 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !77
8335 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP78:![0-9]+]]
8336 // CHECK9:       omp.inner.for.end:
8337 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8338 // CHECK9:       omp.loop.exit:
8339 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8340 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
8341 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
8342 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8343 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8344 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8345 // CHECK9:       .omp.final.then:
8346 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8347 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
8348 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
8349 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
8350 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
8351 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
8352 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8353 // CHECK9:       .omp.final.done:
8354 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
8355 // CHECK9:       omp.precond.end:
8356 // CHECK9-NEXT:    ret void
8357 //
8358 //
8359 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
8360 // CHECK9-SAME: (i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
8361 // CHECK9-NEXT:  entry:
8362 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8363 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
8364 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
8365 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
8366 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8367 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
8368 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
8369 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
8370 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8371 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
8372 // CHECK9-NEXT:    ret void
8373 //
8374 //
8375 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..38
8376 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8377 // CHECK9-NEXT:  entry:
8378 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8379 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8380 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
8381 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
8382 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
8383 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
8384 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8385 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8386 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8387 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8388 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
8389 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8390 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8391 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8392 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8393 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
8394 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8395 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8396 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
8397 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
8398 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
8399 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
8400 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8401 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8402 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8403 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8404 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8405 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8406 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8407 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8408 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8409 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8410 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8411 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
8412 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8413 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8414 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8415 // CHECK9:       omp.precond.then:
8416 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8417 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8418 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
8419 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8420 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8421 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8422 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
8423 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8424 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8425 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8426 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
8427 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8428 // CHECK9:       cond.true:
8429 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8430 // CHECK9-NEXT:    br label [[COND_END:%.*]]
8431 // CHECK9:       cond.false:
8432 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8433 // CHECK9-NEXT:    br label [[COND_END]]
8434 // CHECK9:       cond.end:
8435 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
8436 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8437 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8438 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
8439 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8440 // CHECK9:       omp.inner.for.cond:
8441 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
8442 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
8443 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
8444 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8445 // CHECK9:       omp.inner.for.body:
8446 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !80
8447 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
8448 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !80
8449 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8450 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !80
8451 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8452 // CHECK9:       omp.inner.for.inc:
8453 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
8454 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !80
8455 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
8456 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !80
8457 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP81:![0-9]+]]
8458 // CHECK9:       omp.inner.for.end:
8459 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8460 // CHECK9:       omp.loop.exit:
8461 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8462 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
8463 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
8464 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8465 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
8466 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8467 // CHECK9:       .omp.final.then:
8468 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8469 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
8470 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
8471 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
8472 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
8473 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
8474 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8475 // CHECK9:       .omp.final.done:
8476 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
8477 // CHECK9:       omp.precond.end:
8478 // CHECK9-NEXT:    ret void
8479 //
8480 //
8481 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..39
8482 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8483 // CHECK9-NEXT:  entry:
8484 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8485 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8486 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8487 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8488 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
8489 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
8490 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
8491 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
8492 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8493 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8494 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8495 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8496 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
8497 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8498 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8499 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8500 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8501 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
8502 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8503 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8504 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8505 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8506 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
8507 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
8508 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
8509 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
8510 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8511 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8512 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8513 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8514 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8515 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8516 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8517 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8518 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8519 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8520 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8521 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
8522 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8523 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8524 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8525 // CHECK9:       omp.precond.then:
8526 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8527 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8528 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8529 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8530 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
8531 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8532 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
8533 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
8534 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
8535 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8536 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8537 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8538 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8539 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8540 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8541 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8542 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8543 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8544 // CHECK9:       cond.true:
8545 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8546 // CHECK9-NEXT:    br label [[COND_END:%.*]]
8547 // CHECK9:       cond.false:
8548 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8549 // CHECK9-NEXT:    br label [[COND_END]]
8550 // CHECK9:       cond.end:
8551 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8552 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8553 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8554 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8555 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8556 // CHECK9:       omp.inner.for.cond:
8557 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
8558 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !83
8559 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8560 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8561 // CHECK9:       omp.inner.for.body:
8562 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
8563 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
8564 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8565 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !83
8566 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !83
8567 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
8568 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
8569 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i64 [[IDXPROM]]
8570 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !83
8571 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !83
8572 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
8573 // CHECK9-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[TMP24]] to i64
8574 // CHECK9-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM7]]
8575 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !83
8576 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
8577 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !83
8578 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !83
8579 // CHECK9-NEXT:    [[IDXPROM10:%.*]] = sext i32 [[TMP27]] to i64
8580 // CHECK9-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM10]]
8581 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX11]], align 4, !llvm.access.group !83
8582 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8583 // CHECK9:       omp.body.continue:
8584 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8585 // CHECK9:       omp.inner.for.inc:
8586 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
8587 // CHECK9-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP28]], 1
8588 // CHECK9-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !83
8589 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP84:![0-9]+]]
8590 // CHECK9:       omp.inner.for.end:
8591 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8592 // CHECK9:       omp.loop.exit:
8593 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8594 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
8595 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
8596 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8597 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
8598 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8599 // CHECK9:       .omp.final.then:
8600 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8601 // CHECK9-NEXT:    [[SUB13:%.*]] = sub nsw i32 [[TMP33]], 0
8602 // CHECK9-NEXT:    [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
8603 // CHECK9-NEXT:    [[MUL15:%.*]] = mul nsw i32 [[DIV14]], 1
8604 // CHECK9-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL15]]
8605 // CHECK9-NEXT:    store i32 [[ADD16]], i32* [[I4]], align 4
8606 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8607 // CHECK9:       .omp.final.done:
8608 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
8609 // CHECK9:       omp.precond.end:
8610 // CHECK9-NEXT:    ret void
8611 //
8612 //
8613 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
8614 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
8615 // CHECK9-NEXT:  entry:
8616 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
8617 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8618 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
8619 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
8620 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
8621 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
8622 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8623 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
8624 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
8625 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
8626 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
8627 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8628 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
8629 // CHECK9-NEXT:    ret void
8630 //
8631 //
8632 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..42
8633 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8634 // CHECK9-NEXT:  entry:
8635 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8636 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8637 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
8638 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
8639 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
8640 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
8641 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
8642 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8643 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8644 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8645 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8646 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8647 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
8648 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8649 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8650 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8651 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8652 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
8653 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8654 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8655 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8656 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
8657 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
8658 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
8659 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
8660 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
8661 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
8662 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8663 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8664 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8665 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8666 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
8667 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
8668 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
8669 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8670 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8671 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
8672 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8673 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8674 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
8675 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
8676 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8677 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
8678 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8679 // CHECK9:       omp.precond.then:
8680 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8681 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8682 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
8683 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8684 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8685 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8686 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
8687 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8688 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8689 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8690 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
8691 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8692 // CHECK9:       cond.true:
8693 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8694 // CHECK9-NEXT:    br label [[COND_END:%.*]]
8695 // CHECK9:       cond.false:
8696 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8697 // CHECK9-NEXT:    br label [[COND_END]]
8698 // CHECK9:       cond.end:
8699 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
8700 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8701 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8702 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
8703 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8704 // CHECK9:       omp.inner.for.cond:
8705 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
8706 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
8707 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
8708 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8709 // CHECK9:       omp.inner.for.body:
8710 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !86
8711 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8712 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !86
8713 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
8714 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !86
8715 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
8716 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !86
8717 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !86
8718 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !86
8719 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8720 // CHECK9:       omp.inner.for.inc:
8721 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
8722 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !86
8723 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
8724 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !86
8725 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP87:![0-9]+]]
8726 // CHECK9:       omp.inner.for.end:
8727 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
8728 // CHECK9:       omp.loop.exit:
8729 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8730 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
8731 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
8732 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8733 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
8734 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8735 // CHECK9:       .omp.final.then:
8736 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8737 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
8738 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
8739 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
8740 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
8741 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
8742 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8743 // CHECK9:       .omp.final.done:
8744 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
8745 // CHECK9:       omp.precond.end:
8746 // CHECK9-NEXT:    ret void
8747 //
8748 //
8749 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..43
8750 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
8751 // CHECK9-NEXT:  entry:
8752 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8753 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8754 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8755 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8756 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
8757 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
8758 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
8759 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
8760 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
8761 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8762 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8763 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8764 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8765 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
8766 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
8767 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
8768 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8769 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8770 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
8771 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8772 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8773 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8774 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8775 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
8776 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
8777 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
8778 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
8779 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
8780 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8781 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8782 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8783 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8784 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
8785 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8786 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8787 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8788 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8789 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8790 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8791 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
8792 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
8793 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8794 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8795 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8796 // CHECK9:       omp.precond.then:
8797 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
8798 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
8799 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
8800 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
8801 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
8802 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8803 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
8804 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
8805 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
8806 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8807 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8808 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
8809 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8810 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
8811 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
8812 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
8813 // CHECK9:       omp.dispatch.cond:
8814 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8815 // CHECK9-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8816 // CHECK9-NEXT:    [[CONV7:%.*]] = trunc i64 [[TMP14]] to i32
8817 // CHECK9-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[TMP13]], [[CONV7]]
8818 // CHECK9-NEXT:    br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8819 // CHECK9:       cond.true:
8820 // CHECK9-NEXT:    [[TMP15:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
8821 // CHECK9-NEXT:    [[CONV9:%.*]] = trunc i64 [[TMP15]] to i32
8822 // CHECK9-NEXT:    br label [[COND_END:%.*]]
8823 // CHECK9:       cond.false:
8824 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8825 // CHECK9-NEXT:    br label [[COND_END]]
8826 // CHECK9:       cond.end:
8827 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[CONV9]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
8828 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
8829 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8830 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
8831 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
8832 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8833 // CHECK9-NEXT:    [[CMP10:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
8834 // CHECK9-NEXT:    br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
8835 // CHECK9:       omp.dispatch.body:
8836 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8837 // CHECK9:       omp.inner.for.cond:
8838 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
8839 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !89
8840 // CHECK9-NEXT:    [[CMP11:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
8841 // CHECK9-NEXT:    br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8842 // CHECK9:       omp.inner.for.body:
8843 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
8844 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
8845 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8846 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !89
8847 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !89
8848 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
8849 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
8850 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i64 [[IDXPROM]]
8851 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !89
8852 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !89
8853 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
8854 // CHECK9-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[TMP27]] to i64
8855 // CHECK9-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM12]]
8856 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX13]], align 4, !llvm.access.group !89
8857 // CHECK9-NEXT:    [[ADD14:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
8858 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !89
8859 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !89
8860 // CHECK9-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[TMP30]] to i64
8861 // CHECK9-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM15]]
8862 // CHECK9-NEXT:    store i32 [[ADD14]], i32* [[ARRAYIDX16]], align 4, !llvm.access.group !89
8863 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
8864 // CHECK9:       omp.body.continue:
8865 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8866 // CHECK9:       omp.inner.for.inc:
8867 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
8868 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP31]], 1
8869 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !89
8870 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP90:![0-9]+]]
8871 // CHECK9:       omp.inner.for.end:
8872 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
8873 // CHECK9:       omp.dispatch.inc:
8874 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
8875 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8876 // CHECK9-NEXT:    [[ADD18:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
8877 // CHECK9-NEXT:    store i32 [[ADD18]], i32* [[DOTOMP_LB]], align 4
8878 // CHECK9-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
8879 // CHECK9-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
8880 // CHECK9-NEXT:    [[ADD19:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
8881 // CHECK9-NEXT:    store i32 [[ADD19]], i32* [[DOTOMP_UB]], align 4
8882 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
8883 // CHECK9:       omp.dispatch.end:
8884 // CHECK9-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8885 // CHECK9-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
8886 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
8887 // CHECK9-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
8888 // CHECK9-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
8889 // CHECK9-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8890 // CHECK9:       .omp.final.then:
8891 // CHECK9-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8892 // CHECK9-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[TMP40]], 0
8893 // CHECK9-NEXT:    [[DIV21:%.*]] = sdiv i32 [[SUB20]], 1
8894 // CHECK9-NEXT:    [[MUL22:%.*]] = mul nsw i32 [[DIV21]], 1
8895 // CHECK9-NEXT:    [[ADD23:%.*]] = add nsw i32 0, [[MUL22]]
8896 // CHECK9-NEXT:    store i32 [[ADD23]], i32* [[I6]], align 4
8897 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
8898 // CHECK9:       .omp.final.done:
8899 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
8900 // CHECK9:       omp.precond.end:
8901 // CHECK9-NEXT:    ret void
8902 //
8903 //
8904 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
8905 // CHECK9-SAME: (i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
8906 // CHECK9-NEXT:  entry:
8907 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
8908 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
8909 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
8910 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
8911 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
8912 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
8913 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
8914 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
8915 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
8916 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[CONV]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
8917 // CHECK9-NEXT:    ret void
8918 //
8919 //
8920 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..46
8921 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
8922 // CHECK9-NEXT:  entry:
8923 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
8924 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
8925 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
8926 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
8927 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
8928 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
8929 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
8930 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
8931 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8932 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8933 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
8934 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8935 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8936 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8937 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8938 // CHECK9-NEXT:    [[I3:%.*]] = alloca i32, align 4
8939 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
8940 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
8941 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
8942 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
8943 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
8944 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
8945 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
8946 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
8947 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
8948 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
8949 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
8950 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
8951 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8952 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
8953 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8954 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8955 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
8956 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
8957 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
8958 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
8959 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8960 // CHECK9:       omp.precond.then:
8961 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
8962 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8963 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
8964 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
8965 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
8966 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
8967 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
8968 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
8969 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8970 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8971 // CHECK9-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
8972 // CHECK9-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8973 // CHECK9:       cond.true:
8974 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
8975 // CHECK9-NEXT:    br label [[COND_END:%.*]]
8976 // CHECK9:       cond.false:
8977 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
8978 // CHECK9-NEXT:    br label [[COND_END]]
8979 // CHECK9:       cond.end:
8980 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
8981 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
8982 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
8983 // CHECK9-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
8984 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
8985 // CHECK9:       omp.inner.for.cond:
8986 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
8987 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
8988 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
8989 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8990 // CHECK9:       omp.inner.for.body:
8991 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !92
8992 // CHECK9-NEXT:    [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
8993 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !92
8994 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8995 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !92
8996 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
8997 // CHECK9:       omp.inner.for.inc:
8998 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
8999 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !92
9000 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
9001 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !92
9002 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP93:![0-9]+]]
9003 // CHECK9:       omp.inner.for.end:
9004 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9005 // CHECK9:       omp.loop.exit:
9006 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9007 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
9008 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
9009 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9010 // CHECK9-NEXT:    [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9011 // CHECK9-NEXT:    br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9012 // CHECK9:       .omp.final.then:
9013 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9014 // CHECK9-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP27]], 0
9015 // CHECK9-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
9016 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
9017 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
9018 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
9019 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9020 // CHECK9:       .omp.final.done:
9021 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9022 // CHECK9:       omp.precond.end:
9023 // CHECK9-NEXT:    ret void
9024 //
9025 //
9026 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..47
9027 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9028 // CHECK9-NEXT:  entry:
9029 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9030 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9031 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9032 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9033 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9034 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
9035 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
9036 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
9037 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9038 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9039 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9040 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9041 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9042 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9043 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9044 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9045 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9046 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
9047 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9048 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9049 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9050 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9051 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9052 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
9053 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
9054 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
9055 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9056 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
9057 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
9058 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
9059 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9060 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9061 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9062 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9063 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9064 // CHECK9-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9065 // CHECK9-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9066 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9067 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9068 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9069 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9070 // CHECK9:       omp.precond.then:
9071 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9072 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9073 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9074 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9075 // CHECK9-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
9076 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9077 // CHECK9-NEXT:    [[CONV3:%.*]] = trunc i64 [[TMP9]] to i32
9078 // CHECK9-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
9079 // CHECK9-NEXT:    store i32 [[CONV3]], i32* [[DOTOMP_UB]], align 4
9080 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9081 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9082 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9083 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9084 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9085 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
9086 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
9087 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
9088 // CHECK9:       omp.dispatch.cond:
9089 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9090 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
9091 // CHECK9-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
9092 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
9093 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9094 // CHECK9:       omp.dispatch.body:
9095 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9096 // CHECK9-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
9097 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9098 // CHECK9:       omp.inner.for.cond:
9099 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
9100 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !95
9101 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
9102 // CHECK9-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9103 // CHECK9:       omp.inner.for.body:
9104 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
9105 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
9106 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9107 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !95
9108 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !95
9109 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
9110 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64
9111 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i64 [[IDXPROM]]
9112 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !95
9113 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !95
9114 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
9115 // CHECK9-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64
9116 // CHECK9-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i64 [[IDXPROM6]]
9117 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !llvm.access.group !95
9118 // CHECK9-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
9119 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !95
9120 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !95
9121 // CHECK9-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[TMP28]] to i64
9122 // CHECK9-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i64 [[IDXPROM9]]
9123 // CHECK9-NEXT:    store i32 [[ADD8]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !95
9124 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9125 // CHECK9:       omp.body.continue:
9126 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9127 // CHECK9:       omp.inner.for.inc:
9128 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
9129 // CHECK9-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP29]], 1
9130 // CHECK9-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !95
9131 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP96:![0-9]+]]
9132 // CHECK9:       omp.inner.for.end:
9133 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
9134 // CHECK9:       omp.dispatch.inc:
9135 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
9136 // CHECK9:       omp.dispatch.end:
9137 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9138 // CHECK9-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
9139 // CHECK9-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9140 // CHECK9:       .omp.final.then:
9141 // CHECK9-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9142 // CHECK9-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[TMP32]], 0
9143 // CHECK9-NEXT:    [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
9144 // CHECK9-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
9145 // CHECK9-NEXT:    [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
9146 // CHECK9-NEXT:    store i32 [[ADD15]], i32* [[I4]], align 4
9147 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9148 // CHECK9:       .omp.final.done:
9149 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9150 // CHECK9:       omp.precond.end:
9151 // CHECK9-NEXT:    ret void
9152 //
9153 //
9154 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
9155 // CHECK9-SAME: (i64 noundef [[CH:%.*]], i64 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
9156 // CHECK9-NEXT:  entry:
9157 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i64, align 8
9158 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
9159 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
9160 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 8
9161 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 8
9162 // CHECK9-NEXT:    store i64 [[CH]], i64* [[CH_ADDR]], align 8
9163 // CHECK9-NEXT:    store i64 [[N]], i64* [[N_ADDR]], align 8
9164 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
9165 // CHECK9-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 8
9166 // CHECK9-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 8
9167 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[CH_ADDR]] to i32*
9168 // CHECK9-NEXT:    [[CONV1:%.*]] = bitcast i64* [[N_ADDR]] to i32*
9169 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CONV]], i32* [[CONV1]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
9170 // CHECK9-NEXT:    ret void
9171 //
9172 //
9173 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..50
9174 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1]] {
9175 // CHECK9-NEXT:  entry:
9176 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9177 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9178 // CHECK9-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 8
9179 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9180 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
9181 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
9182 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
9183 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9184 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9185 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9186 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9187 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9188 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9189 // CHECK9-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9190 // CHECK9-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9191 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9192 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9193 // CHECK9-NEXT:    [[I4:%.*]] = alloca i32, align 4
9194 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9195 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9196 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9197 // CHECK9-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 8
9198 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9199 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
9200 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
9201 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
9202 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 8
9203 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9204 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
9205 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
9206 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
9207 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
9208 // CHECK9-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
9209 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
9210 // CHECK9-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9211 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9212 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
9213 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9214 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9215 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
9216 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9217 // CHECK9-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9218 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
9219 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9220 // CHECK9:       omp.precond.then:
9221 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9222 // CHECK9-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9223 // CHECK9-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
9224 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9225 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9226 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9227 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
9228 // CHECK9-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
9229 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9230 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9231 // CHECK9-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
9232 // CHECK9-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9233 // CHECK9:       cond.true:
9234 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9235 // CHECK9-NEXT:    br label [[COND_END:%.*]]
9236 // CHECK9:       cond.false:
9237 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
9238 // CHECK9-NEXT:    br label [[COND_END]]
9239 // CHECK9:       cond.end:
9240 // CHECK9-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
9241 // CHECK9-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
9242 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
9243 // CHECK9-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
9244 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9245 // CHECK9:       omp.inner.for.cond:
9246 // CHECK9-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
9247 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
9248 // CHECK9-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9249 // CHECK9-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9250 // CHECK9:       omp.inner.for.body:
9251 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !98
9252 // CHECK9-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
9253 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !98
9254 // CHECK9-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
9255 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !98
9256 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
9257 // CHECK9-NEXT:    store i32 [[TMP23]], i32* [[CONV]], align 4, !llvm.access.group !98
9258 // CHECK9-NEXT:    [[TMP24:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !98
9259 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i32**, i32**, i32**, i64)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i64 [[TMP20]], i64 [[TMP22]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i64 [[TMP24]]), !llvm.access.group !98
9260 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9261 // CHECK9:       omp.inner.for.inc:
9262 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
9263 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !98
9264 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
9265 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !98
9266 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP99:![0-9]+]]
9267 // CHECK9:       omp.inner.for.end:
9268 // CHECK9-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
9269 // CHECK9:       omp.loop.exit:
9270 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9271 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
9272 // CHECK9-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP28]])
9273 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9274 // CHECK9-NEXT:    [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
9275 // CHECK9-NEXT:    br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9276 // CHECK9:       .omp.final.then:
9277 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9278 // CHECK9-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP31]], 0
9279 // CHECK9-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
9280 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
9281 // CHECK9-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
9282 // CHECK9-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
9283 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9284 // CHECK9:       .omp.final.done:
9285 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9286 // CHECK9:       omp.precond.end:
9287 // CHECK9-NEXT:    ret void
9288 //
9289 //
9290 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..51
9291 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
9292 // CHECK9-NEXT:  entry:
9293 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
9294 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
9295 // CHECK9-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9296 // CHECK9-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9297 // CHECK9-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 8
9298 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
9299 // CHECK9-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
9300 // CHECK9-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 8
9301 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9302 // CHECK9-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9303 // CHECK9-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9304 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9305 // CHECK9-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9306 // CHECK9-NEXT:    [[I:%.*]] = alloca i32, align 4
9307 // CHECK9-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
9308 // CHECK9-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
9309 // CHECK9-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9310 // CHECK9-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9311 // CHECK9-NEXT:    [[I6:%.*]] = alloca i32, align 4
9312 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
9313 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
9314 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9315 // CHECK9-NEXT:    store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9316 // CHECK9-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 8
9317 // CHECK9-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
9318 // CHECK9-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
9319 // CHECK9-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 8
9320 // CHECK9-NEXT:    store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
9321 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8
9322 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
9323 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
9324 // CHECK9-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 8
9325 // CHECK9-NEXT:    [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
9326 // CHECK9-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9327 // CHECK9-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9328 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9329 // CHECK9-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9330 // CHECK9-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9331 // CHECK9-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9332 // CHECK9-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
9333 // CHECK9-NEXT:    store i32 0, i32* [[I]], align 4
9334 // CHECK9-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9335 // CHECK9-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9336 // CHECK9-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9337 // CHECK9:       omp.precond.then:
9338 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
9339 // CHECK9-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
9340 // CHECK9-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
9341 // CHECK9-NEXT:    [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
9342 // CHECK9-NEXT:    [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
9343 // CHECK9-NEXT:    [[TMP9:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
9344 // CHECK9-NEXT:    [[CONV5:%.*]] = trunc i64 [[TMP9]] to i32
9345 // CHECK9-NEXT:    store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
9346 // CHECK9-NEXT:    store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
9347 // CHECK9-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9348 // CHECK9-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9349 // CHECK9-NEXT:    [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4
9350 // CHECK9-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9351 // CHECK9-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
9352 // CHECK9-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9353 // CHECK9-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
9354 // CHECK9-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
9355 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
9356 // CHECK9:       omp.dispatch.cond:
9357 // CHECK9-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
9358 // CHECK9-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
9359 // CHECK9-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
9360 // CHECK9-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
9361 // CHECK9-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9362 // CHECK9:       omp.dispatch.body:
9363 // CHECK9-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
9364 // CHECK9-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
9365 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
9366 // CHECK9:       omp.inner.for.cond:
9367 // CHECK9-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
9368 // CHECK9-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !101
9369 // CHECK9-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
9370 // CHECK9-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9371 // CHECK9:       omp.inner.for.body:
9372 // CHECK9-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
9373 // CHECK9-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
9374 // CHECK9-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9375 // CHECK9-NEXT:    store i32 [[ADD]], i32* [[I6]], align 4, !llvm.access.group !101
9376 // CHECK9-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 8, !llvm.access.group !101
9377 // CHECK9-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
9378 // CHECK9-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
9379 // CHECK9-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i64 [[IDXPROM]]
9380 // CHECK9-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !101
9381 // CHECK9-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 8, !llvm.access.group !101
9382 // CHECK9-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
9383 // CHECK9-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP26]] to i64
9384 // CHECK9-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i64 [[IDXPROM8]]
9385 // CHECK9-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4, !llvm.access.group !101
9386 // CHECK9-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
9387 // CHECK9-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 8, !llvm.access.group !101
9388 // CHECK9-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !101
9389 // CHECK9-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[TMP29]] to i64
9390 // CHECK9-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i64 [[IDXPROM11]]
9391 // CHECK9-NEXT:    store i32 [[ADD10]], i32* [[ARRAYIDX12]], align 4, !llvm.access.group !101
9392 // CHECK9-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
9393 // CHECK9:       omp.body.continue:
9394 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
9395 // CHECK9:       omp.inner.for.inc:
9396 // CHECK9-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
9397 // CHECK9-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP30]], 1
9398 // CHECK9-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !101
9399 // CHECK9-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP102:![0-9]+]]
9400 // CHECK9:       omp.inner.for.end:
9401 // CHECK9-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
9402 // CHECK9:       omp.dispatch.inc:
9403 // CHECK9-NEXT:    br label [[OMP_DISPATCH_COND]]
9404 // CHECK9:       omp.dispatch.end:
9405 // CHECK9-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
9406 // CHECK9-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9407 // CHECK9-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9408 // CHECK9:       .omp.final.then:
9409 // CHECK9-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9410 // CHECK9-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP33]], 0
9411 // CHECK9-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
9412 // CHECK9-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
9413 // CHECK9-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
9414 // CHECK9-NEXT:    store i32 [[ADD17]], i32* [[I6]], align 4
9415 // CHECK9-NEXT:    br label [[DOTOMP_FINAL_DONE]]
9416 // CHECK9:       .omp.final.done:
9417 // CHECK9-NEXT:    br label [[OMP_PRECOND_END]]
9418 // CHECK9:       omp.precond.end:
9419 // CHECK9-NEXT:    ret void
9420 //
9421 //
9422 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
9423 // CHECK9-SAME: () #[[ATTR4:[0-9]+]] {
9424 // CHECK9-NEXT:  entry:
9425 // CHECK9-NEXT:    call void @__tgt_register_requires(i64 1)
9426 // CHECK9-NEXT:    ret void
9427 //
9428 //
9429 // CHECK11-LABEL: define {{[^@]+}}@main
9430 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
9431 // CHECK11-NEXT:  entry:
9432 // CHECK11-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
9433 // CHECK11-NEXT:    [[A:%.*]] = alloca double*, align 4
9434 // CHECK11-NEXT:    [[B:%.*]] = alloca double*, align 4
9435 // CHECK11-NEXT:    [[C:%.*]] = alloca double*, align 4
9436 // CHECK11-NEXT:    [[N:%.*]] = alloca i32, align 4
9437 // CHECK11-NEXT:    [[CH:%.*]] = alloca i32, align 4
9438 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
9439 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
9440 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
9441 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
9442 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9443 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9444 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9445 // CHECK11-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
9446 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
9447 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
9448 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
9449 // CHECK11-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
9450 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
9451 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
9452 // CHECK11-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
9453 // CHECK11-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
9454 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
9455 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
9456 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
9457 // CHECK11-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
9458 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
9459 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
9460 // CHECK11-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
9461 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
9462 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
9463 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
9464 // CHECK11-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
9465 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
9466 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
9467 // CHECK11-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
9468 // CHECK11-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
9469 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
9470 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
9471 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
9472 // CHECK11-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
9473 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
9474 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
9475 // CHECK11-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
9476 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
9477 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
9478 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
9479 // CHECK11-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
9480 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
9481 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
9482 // CHECK11-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
9483 // CHECK11-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
9484 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
9485 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
9486 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
9487 // CHECK11-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
9488 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
9489 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
9490 // CHECK11-NEXT:    store i32 0, i32* [[RETVAL]], align 4
9491 // CHECK11-NEXT:    store i32 10000, i32* [[N]], align 4
9492 // CHECK11-NEXT:    store i32 100, i32* [[CH]], align 4
9493 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
9494 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
9495 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
9496 // CHECK11-NEXT:    [[TMP2:%.*]] = load double*, double** [[A]], align 4
9497 // CHECK11-NEXT:    [[TMP3:%.*]] = load double*, double** [[B]], align 4
9498 // CHECK11-NEXT:    [[TMP4:%.*]] = load double*, double** [[C]], align 4
9499 // CHECK11-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9500 // CHECK11-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
9501 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
9502 // CHECK11-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9503 // CHECK11-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
9504 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
9505 // CHECK11-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
9506 // CHECK11-NEXT:    store i8* null, i8** [[TMP9]], align 4
9507 // CHECK11-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
9508 // CHECK11-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to double**
9509 // CHECK11-NEXT:    store double* [[TMP2]], double** [[TMP11]], align 4
9510 // CHECK11-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
9511 // CHECK11-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double**
9512 // CHECK11-NEXT:    store double* [[TMP2]], double** [[TMP13]], align 4
9513 // CHECK11-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
9514 // CHECK11-NEXT:    store i8* null, i8** [[TMP14]], align 4
9515 // CHECK11-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
9516 // CHECK11-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double**
9517 // CHECK11-NEXT:    store double* [[TMP3]], double** [[TMP16]], align 4
9518 // CHECK11-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
9519 // CHECK11-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to double**
9520 // CHECK11-NEXT:    store double* [[TMP3]], double** [[TMP18]], align 4
9521 // CHECK11-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
9522 // CHECK11-NEXT:    store i8* null, i8** [[TMP19]], align 4
9523 // CHECK11-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
9524 // CHECK11-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to double**
9525 // CHECK11-NEXT:    store double* [[TMP4]], double** [[TMP21]], align 4
9526 // CHECK11-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
9527 // CHECK11-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to double**
9528 // CHECK11-NEXT:    store double* [[TMP4]], double** [[TMP23]], align 4
9529 // CHECK11-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
9530 // CHECK11-NEXT:    store i8* null, i8** [[TMP24]], align 4
9531 // CHECK11-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9532 // CHECK11-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9533 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
9534 // CHECK11-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
9535 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9536 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
9537 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9538 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9539 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9540 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9541 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
9542 // CHECK11-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
9543 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
9544 // CHECK11-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9545 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
9546 // CHECK11-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
9547 // CHECK11:       omp_offload.failed:
9548 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368(i32 [[TMP1]], double* [[TMP2]], double* [[TMP3]], double* [[TMP4]]) #[[ATTR2:[0-9]+]]
9549 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT]]
9550 // CHECK11:       omp_offload.cont:
9551 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
9552 // CHECK11-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
9553 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
9554 // CHECK11-NEXT:    [[TMP35:%.*]] = load double*, double** [[A]], align 4
9555 // CHECK11-NEXT:    [[TMP36:%.*]] = load double*, double** [[B]], align 4
9556 // CHECK11-NEXT:    [[TMP37:%.*]] = load double*, double** [[C]], align 4
9557 // CHECK11-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
9558 // CHECK11-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
9559 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
9560 // CHECK11-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
9561 // CHECK11-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
9562 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
9563 // CHECK11-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
9564 // CHECK11-NEXT:    store i8* null, i8** [[TMP42]], align 4
9565 // CHECK11-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
9566 // CHECK11-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double**
9567 // CHECK11-NEXT:    store double* [[TMP35]], double** [[TMP44]], align 4
9568 // CHECK11-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
9569 // CHECK11-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double**
9570 // CHECK11-NEXT:    store double* [[TMP35]], double** [[TMP46]], align 4
9571 // CHECK11-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
9572 // CHECK11-NEXT:    store i8* null, i8** [[TMP47]], align 4
9573 // CHECK11-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
9574 // CHECK11-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to double**
9575 // CHECK11-NEXT:    store double* [[TMP36]], double** [[TMP49]], align 4
9576 // CHECK11-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
9577 // CHECK11-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double**
9578 // CHECK11-NEXT:    store double* [[TMP36]], double** [[TMP51]], align 4
9579 // CHECK11-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
9580 // CHECK11-NEXT:    store i8* null, i8** [[TMP52]], align 4
9581 // CHECK11-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
9582 // CHECK11-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double**
9583 // CHECK11-NEXT:    store double* [[TMP37]], double** [[TMP54]], align 4
9584 // CHECK11-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
9585 // CHECK11-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to double**
9586 // CHECK11-NEXT:    store double* [[TMP37]], double** [[TMP56]], align 4
9587 // CHECK11-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
9588 // CHECK11-NEXT:    store i8* null, i8** [[TMP57]], align 4
9589 // CHECK11-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
9590 // CHECK11-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
9591 // CHECK11-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
9592 // CHECK11-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
9593 // CHECK11-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
9594 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
9595 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
9596 // CHECK11-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
9597 // CHECK11-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
9598 // CHECK11-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
9599 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
9600 // CHECK11-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
9601 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
9602 // CHECK11-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9603 // CHECK11-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
9604 // CHECK11-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
9605 // CHECK11:       omp_offload.failed14:
9606 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407(i32 [[TMP34]], double* [[TMP35]], double* [[TMP36]], double* [[TMP37]]) #[[ATTR2]]
9607 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
9608 // CHECK11:       omp_offload.cont15:
9609 // CHECK11-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
9610 // CHECK11-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
9611 // CHECK11-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
9612 // CHECK11-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
9613 // CHECK11-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
9614 // CHECK11-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
9615 // CHECK11-NEXT:    [[TMP70:%.*]] = load double*, double** [[A]], align 4
9616 // CHECK11-NEXT:    [[TMP71:%.*]] = load double*, double** [[B]], align 4
9617 // CHECK11-NEXT:    [[TMP72:%.*]] = load double*, double** [[C]], align 4
9618 // CHECK11-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
9619 // CHECK11-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
9620 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
9621 // CHECK11-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
9622 // CHECK11-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
9623 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
9624 // CHECK11-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
9625 // CHECK11-NEXT:    store i8* null, i8** [[TMP77]], align 4
9626 // CHECK11-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
9627 // CHECK11-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
9628 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
9629 // CHECK11-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
9630 // CHECK11-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
9631 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
9632 // CHECK11-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
9633 // CHECK11-NEXT:    store i8* null, i8** [[TMP82]], align 4
9634 // CHECK11-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
9635 // CHECK11-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to double**
9636 // CHECK11-NEXT:    store double* [[TMP70]], double** [[TMP84]], align 4
9637 // CHECK11-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
9638 // CHECK11-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to double**
9639 // CHECK11-NEXT:    store double* [[TMP70]], double** [[TMP86]], align 4
9640 // CHECK11-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
9641 // CHECK11-NEXT:    store i8* null, i8** [[TMP87]], align 4
9642 // CHECK11-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
9643 // CHECK11-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to double**
9644 // CHECK11-NEXT:    store double* [[TMP71]], double** [[TMP89]], align 4
9645 // CHECK11-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
9646 // CHECK11-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to double**
9647 // CHECK11-NEXT:    store double* [[TMP71]], double** [[TMP91]], align 4
9648 // CHECK11-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
9649 // CHECK11-NEXT:    store i8* null, i8** [[TMP92]], align 4
9650 // CHECK11-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
9651 // CHECK11-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double**
9652 // CHECK11-NEXT:    store double* [[TMP72]], double** [[TMP94]], align 4
9653 // CHECK11-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
9654 // CHECK11-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to double**
9655 // CHECK11-NEXT:    store double* [[TMP72]], double** [[TMP96]], align 4
9656 // CHECK11-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
9657 // CHECK11-NEXT:    store i8* null, i8** [[TMP97]], align 4
9658 // CHECK11-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
9659 // CHECK11-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
9660 // CHECK11-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
9661 // CHECK11-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
9662 // CHECK11-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
9663 // CHECK11-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
9664 // CHECK11-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
9665 // CHECK11-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
9666 // CHECK11-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
9667 // CHECK11-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
9668 // CHECK11-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
9669 // CHECK11-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
9670 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
9671 // CHECK11-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9672 // CHECK11-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
9673 // CHECK11-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
9674 // CHECK11:       omp_offload.failed27:
9675 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446(i32 [[TMP67]], i32 [[TMP69]], double* [[TMP70]], double* [[TMP71]], double* [[TMP72]]) #[[ATTR2]]
9676 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
9677 // CHECK11:       omp_offload.cont28:
9678 // CHECK11-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
9679 // CHECK11-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
9680 // CHECK11-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
9681 // CHECK11-NEXT:    [[TMP108:%.*]] = load double*, double** [[A]], align 4
9682 // CHECK11-NEXT:    [[TMP109:%.*]] = load double*, double** [[B]], align 4
9683 // CHECK11-NEXT:    [[TMP110:%.*]] = load double*, double** [[C]], align 4
9684 // CHECK11-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
9685 // CHECK11-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
9686 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
9687 // CHECK11-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
9688 // CHECK11-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
9689 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
9690 // CHECK11-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
9691 // CHECK11-NEXT:    store i8* null, i8** [[TMP115]], align 4
9692 // CHECK11-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
9693 // CHECK11-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to double**
9694 // CHECK11-NEXT:    store double* [[TMP108]], double** [[TMP117]], align 4
9695 // CHECK11-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
9696 // CHECK11-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to double**
9697 // CHECK11-NEXT:    store double* [[TMP108]], double** [[TMP119]], align 4
9698 // CHECK11-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
9699 // CHECK11-NEXT:    store i8* null, i8** [[TMP120]], align 4
9700 // CHECK11-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
9701 // CHECK11-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double**
9702 // CHECK11-NEXT:    store double* [[TMP109]], double** [[TMP122]], align 4
9703 // CHECK11-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
9704 // CHECK11-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double**
9705 // CHECK11-NEXT:    store double* [[TMP109]], double** [[TMP124]], align 4
9706 // CHECK11-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
9707 // CHECK11-NEXT:    store i8* null, i8** [[TMP125]], align 4
9708 // CHECK11-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
9709 // CHECK11-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double**
9710 // CHECK11-NEXT:    store double* [[TMP110]], double** [[TMP127]], align 4
9711 // CHECK11-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
9712 // CHECK11-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to double**
9713 // CHECK11-NEXT:    store double* [[TMP110]], double** [[TMP129]], align 4
9714 // CHECK11-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
9715 // CHECK11-NEXT:    store i8* null, i8** [[TMP130]], align 4
9716 // CHECK11-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
9717 // CHECK11-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
9718 // CHECK11-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
9719 // CHECK11-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
9720 // CHECK11-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
9721 // CHECK11-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
9722 // CHECK11-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
9723 // CHECK11-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
9724 // CHECK11-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
9725 // CHECK11-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
9726 // CHECK11-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
9727 // CHECK11-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
9728 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
9729 // CHECK11-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9730 // CHECK11-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
9731 // CHECK11-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
9732 // CHECK11:       omp_offload.failed40:
9733 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477(i32 [[TMP107]], double* [[TMP108]], double* [[TMP109]], double* [[TMP110]]) #[[ATTR2]]
9734 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
9735 // CHECK11:       omp_offload.cont41:
9736 // CHECK11-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
9737 // CHECK11-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
9738 // CHECK11-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
9739 // CHECK11-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
9740 // CHECK11-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
9741 // CHECK11-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
9742 // CHECK11-NEXT:    [[TMP143:%.*]] = load double*, double** [[A]], align 4
9743 // CHECK11-NEXT:    [[TMP144:%.*]] = load double*, double** [[B]], align 4
9744 // CHECK11-NEXT:    [[TMP145:%.*]] = load double*, double** [[C]], align 4
9745 // CHECK11-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
9746 // CHECK11-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
9747 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
9748 // CHECK11-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
9749 // CHECK11-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
9750 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
9751 // CHECK11-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
9752 // CHECK11-NEXT:    store i8* null, i8** [[TMP150]], align 4
9753 // CHECK11-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
9754 // CHECK11-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
9755 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
9756 // CHECK11-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
9757 // CHECK11-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
9758 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
9759 // CHECK11-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
9760 // CHECK11-NEXT:    store i8* null, i8** [[TMP155]], align 4
9761 // CHECK11-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
9762 // CHECK11-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to double**
9763 // CHECK11-NEXT:    store double* [[TMP143]], double** [[TMP157]], align 4
9764 // CHECK11-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
9765 // CHECK11-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to double**
9766 // CHECK11-NEXT:    store double* [[TMP143]], double** [[TMP159]], align 4
9767 // CHECK11-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
9768 // CHECK11-NEXT:    store i8* null, i8** [[TMP160]], align 4
9769 // CHECK11-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
9770 // CHECK11-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to double**
9771 // CHECK11-NEXT:    store double* [[TMP144]], double** [[TMP162]], align 4
9772 // CHECK11-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
9773 // CHECK11-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to double**
9774 // CHECK11-NEXT:    store double* [[TMP144]], double** [[TMP164]], align 4
9775 // CHECK11-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
9776 // CHECK11-NEXT:    store i8* null, i8** [[TMP165]], align 4
9777 // CHECK11-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
9778 // CHECK11-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to double**
9779 // CHECK11-NEXT:    store double* [[TMP145]], double** [[TMP167]], align 4
9780 // CHECK11-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
9781 // CHECK11-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to double**
9782 // CHECK11-NEXT:    store double* [[TMP145]], double** [[TMP169]], align 4
9783 // CHECK11-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
9784 // CHECK11-NEXT:    store i8* null, i8** [[TMP170]], align 4
9785 // CHECK11-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
9786 // CHECK11-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
9787 // CHECK11-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
9788 // CHECK11-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
9789 // CHECK11-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
9790 // CHECK11-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
9791 // CHECK11-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
9792 // CHECK11-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
9793 // CHECK11-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
9794 // CHECK11-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
9795 // CHECK11-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
9796 // CHECK11-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
9797 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
9798 // CHECK11-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9799 // CHECK11-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
9800 // CHECK11-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
9801 // CHECK11:       omp_offload.failed54:
9802 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505(i32 [[TMP140]], i32 [[TMP142]], double* [[TMP143]], double* [[TMP144]], double* [[TMP145]]) #[[ATTR2]]
9803 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
9804 // CHECK11:       omp_offload.cont55:
9805 // CHECK11-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
9806 // CHECK11-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
9807 // CHECK11-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
9808 // CHECK11-NEXT:    [[TMP181:%.*]] = load double*, double** [[A]], align 4
9809 // CHECK11-NEXT:    [[TMP182:%.*]] = load double*, double** [[B]], align 4
9810 // CHECK11-NEXT:    [[TMP183:%.*]] = load double*, double** [[C]], align 4
9811 // CHECK11-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
9812 // CHECK11-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
9813 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
9814 // CHECK11-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
9815 // CHECK11-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
9816 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
9817 // CHECK11-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
9818 // CHECK11-NEXT:    store i8* null, i8** [[TMP188]], align 4
9819 // CHECK11-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
9820 // CHECK11-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to double**
9821 // CHECK11-NEXT:    store double* [[TMP181]], double** [[TMP190]], align 4
9822 // CHECK11-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
9823 // CHECK11-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to double**
9824 // CHECK11-NEXT:    store double* [[TMP181]], double** [[TMP192]], align 4
9825 // CHECK11-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
9826 // CHECK11-NEXT:    store i8* null, i8** [[TMP193]], align 4
9827 // CHECK11-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
9828 // CHECK11-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to double**
9829 // CHECK11-NEXT:    store double* [[TMP182]], double** [[TMP195]], align 4
9830 // CHECK11-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
9831 // CHECK11-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to double**
9832 // CHECK11-NEXT:    store double* [[TMP182]], double** [[TMP197]], align 4
9833 // CHECK11-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
9834 // CHECK11-NEXT:    store i8* null, i8** [[TMP198]], align 4
9835 // CHECK11-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
9836 // CHECK11-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to double**
9837 // CHECK11-NEXT:    store double* [[TMP183]], double** [[TMP200]], align 4
9838 // CHECK11-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
9839 // CHECK11-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to double**
9840 // CHECK11-NEXT:    store double* [[TMP183]], double** [[TMP202]], align 4
9841 // CHECK11-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
9842 // CHECK11-NEXT:    store i8* null, i8** [[TMP203]], align 4
9843 // CHECK11-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
9844 // CHECK11-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
9845 // CHECK11-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
9846 // CHECK11-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
9847 // CHECK11-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
9848 // CHECK11-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
9849 // CHECK11-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
9850 // CHECK11-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
9851 // CHECK11-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
9852 // CHECK11-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
9853 // CHECK11-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
9854 // CHECK11-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
9855 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
9856 // CHECK11-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9857 // CHECK11-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
9858 // CHECK11-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
9859 // CHECK11:       omp_offload.failed67:
9860 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535(i32 [[TMP180]], double* [[TMP181]], double* [[TMP182]], double* [[TMP183]]) #[[ATTR2]]
9861 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
9862 // CHECK11:       omp_offload.cont68:
9863 // CHECK11-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
9864 // CHECK11-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
9865 // CHECK11-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
9866 // CHECK11-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
9867 // CHECK11-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
9868 // CHECK11-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
9869 // CHECK11-NEXT:    [[TMP216:%.*]] = load double*, double** [[A]], align 4
9870 // CHECK11-NEXT:    [[TMP217:%.*]] = load double*, double** [[B]], align 4
9871 // CHECK11-NEXT:    [[TMP218:%.*]] = load double*, double** [[C]], align 4
9872 // CHECK11-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
9873 // CHECK11-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
9874 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
9875 // CHECK11-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
9876 // CHECK11-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
9877 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
9878 // CHECK11-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
9879 // CHECK11-NEXT:    store i8* null, i8** [[TMP223]], align 4
9880 // CHECK11-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
9881 // CHECK11-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
9882 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
9883 // CHECK11-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
9884 // CHECK11-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
9885 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
9886 // CHECK11-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
9887 // CHECK11-NEXT:    store i8* null, i8** [[TMP228]], align 4
9888 // CHECK11-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
9889 // CHECK11-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to double**
9890 // CHECK11-NEXT:    store double* [[TMP216]], double** [[TMP230]], align 4
9891 // CHECK11-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
9892 // CHECK11-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to double**
9893 // CHECK11-NEXT:    store double* [[TMP216]], double** [[TMP232]], align 4
9894 // CHECK11-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
9895 // CHECK11-NEXT:    store i8* null, i8** [[TMP233]], align 4
9896 // CHECK11-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
9897 // CHECK11-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to double**
9898 // CHECK11-NEXT:    store double* [[TMP217]], double** [[TMP235]], align 4
9899 // CHECK11-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
9900 // CHECK11-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to double**
9901 // CHECK11-NEXT:    store double* [[TMP217]], double** [[TMP237]], align 4
9902 // CHECK11-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
9903 // CHECK11-NEXT:    store i8* null, i8** [[TMP238]], align 4
9904 // CHECK11-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
9905 // CHECK11-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to double**
9906 // CHECK11-NEXT:    store double* [[TMP218]], double** [[TMP240]], align 4
9907 // CHECK11-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
9908 // CHECK11-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to double**
9909 // CHECK11-NEXT:    store double* [[TMP218]], double** [[TMP242]], align 4
9910 // CHECK11-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
9911 // CHECK11-NEXT:    store i8* null, i8** [[TMP243]], align 4
9912 // CHECK11-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
9913 // CHECK11-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
9914 // CHECK11-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
9915 // CHECK11-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
9916 // CHECK11-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
9917 // CHECK11-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
9918 // CHECK11-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
9919 // CHECK11-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
9920 // CHECK11-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
9921 // CHECK11-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
9922 // CHECK11-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
9923 // CHECK11-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
9924 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
9925 // CHECK11-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
9926 // CHECK11-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
9927 // CHECK11-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
9928 // CHECK11:       omp_offload.failed81:
9929 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561(i32 [[TMP213]], i32 [[TMP215]], double* [[TMP216]], double* [[TMP217]], double* [[TMP218]]) #[[ATTR2]]
9930 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
9931 // CHECK11:       omp_offload.cont82:
9932 // CHECK11-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
9933 // CHECK11-NEXT:    ret i32 [[CALL]]
9934 //
9935 //
9936 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l368
9937 // CHECK11-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1:[0-9]+]] {
9938 // CHECK11-NEXT:  entry:
9939 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
9940 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
9941 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
9942 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
9943 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
9944 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
9945 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
9946 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
9947 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
9948 // CHECK11-NEXT:    ret void
9949 //
9950 //
9951 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
9952 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
9953 // CHECK11-NEXT:  entry:
9954 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
9955 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
9956 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
9957 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
9958 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
9959 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
9960 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
9961 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
9962 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9963 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9964 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
9965 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9966 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9967 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9968 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9969 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
9970 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
9971 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
9972 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
9973 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
9974 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
9975 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
9976 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
9977 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
9978 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
9979 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
9980 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
9981 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
9982 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9983 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
9984 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9985 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9986 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
9987 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
9988 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
9989 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
9990 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9991 // CHECK11:       omp.precond.then:
9992 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
9993 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
9994 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
9995 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
9996 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
9997 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
9998 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
9999 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10000 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10001 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10002 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
10003 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10004 // CHECK11:       cond.true:
10005 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10006 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10007 // CHECK11:       cond.false:
10008 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10009 // CHECK11-NEXT:    br label [[COND_END]]
10010 // CHECK11:       cond.end:
10011 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
10012 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10013 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10014 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
10015 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10016 // CHECK11:       omp.inner.for.cond:
10017 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
10018 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
10019 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
10020 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10021 // CHECK11:       omp.inner.for.body:
10022 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !18
10023 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !18
10024 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !18
10025 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10026 // CHECK11:       omp.inner.for.inc:
10027 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
10028 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !18
10029 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
10030 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
10031 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
10032 // CHECK11:       omp.inner.for.end:
10033 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10034 // CHECK11:       omp.loop.exit:
10035 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10036 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
10037 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
10038 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10039 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
10040 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10041 // CHECK11:       .omp.final.then:
10042 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10043 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
10044 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
10045 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
10046 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
10047 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
10048 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10049 // CHECK11:       .omp.final.done:
10050 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10051 // CHECK11:       omp.precond.end:
10052 // CHECK11-NEXT:    ret void
10053 //
10054 //
10055 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
10056 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10057 // CHECK11-NEXT:  entry:
10058 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10059 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10060 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
10061 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
10062 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
10063 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
10064 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
10065 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
10066 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10067 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10068 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10069 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10070 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10071 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10072 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10073 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10074 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10075 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
10076 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10077 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10078 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10079 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10080 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
10081 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
10082 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
10083 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
10084 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10085 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10086 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10087 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10088 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10089 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10090 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10091 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10092 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10093 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10094 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10095 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10096 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10097 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10098 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10099 // CHECK11:       omp.precond.then:
10100 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10101 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10102 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10103 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10104 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10105 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
10106 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
10107 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10108 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10109 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10110 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10111 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10112 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10113 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10114 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10115 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10116 // CHECK11:       cond.true:
10117 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10118 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10119 // CHECK11:       cond.false:
10120 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10121 // CHECK11-NEXT:    br label [[COND_END]]
10122 // CHECK11:       cond.end:
10123 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10124 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10125 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10126 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10127 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10128 // CHECK11:       omp.inner.for.cond:
10129 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
10130 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
10131 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10132 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10133 // CHECK11:       omp.inner.for.body:
10134 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
10135 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
10136 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10137 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !22
10138 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !22
10139 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
10140 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
10141 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !22
10142 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !22
10143 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
10144 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
10145 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !22
10146 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
10147 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !22
10148 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !22
10149 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
10150 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !22
10151 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10152 // CHECK11:       omp.body.continue:
10153 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10154 // CHECK11:       omp.inner.for.inc:
10155 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
10156 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
10157 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
10158 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
10159 // CHECK11:       omp.inner.for.end:
10160 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10161 // CHECK11:       omp.loop.exit:
10162 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10163 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
10164 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
10165 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10166 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10167 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10168 // CHECK11:       .omp.final.then:
10169 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10170 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
10171 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
10172 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
10173 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
10174 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
10175 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10176 // CHECK11:       .omp.final.done:
10177 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10178 // CHECK11:       omp.precond.end:
10179 // CHECK11-NEXT:    ret void
10180 //
10181 //
10182 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l407
10183 // CHECK11-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
10184 // CHECK11-NEXT:  entry:
10185 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10186 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
10187 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
10188 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
10189 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10190 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
10191 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
10192 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
10193 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10194 // CHECK11-NEXT:    ret void
10195 //
10196 //
10197 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..2
10198 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10199 // CHECK11-NEXT:  entry:
10200 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10201 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10202 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
10203 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
10204 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
10205 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
10206 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10207 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10208 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10209 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10210 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10211 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10212 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10213 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10214 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10215 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
10216 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10217 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10218 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
10219 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
10220 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
10221 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
10222 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10223 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10224 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10225 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10226 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10227 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10228 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10229 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10230 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10231 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10232 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10233 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10234 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10235 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10236 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10237 // CHECK11:       omp.precond.then:
10238 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10239 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10240 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
10241 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10242 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10243 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10244 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
10245 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10246 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10247 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10248 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
10249 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10250 // CHECK11:       cond.true:
10251 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10252 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10253 // CHECK11:       cond.false:
10254 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10255 // CHECK11-NEXT:    br label [[COND_END]]
10256 // CHECK11:       cond.end:
10257 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
10258 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10259 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10260 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
10261 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10262 // CHECK11:       omp.inner.for.cond:
10263 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10264 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
10265 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
10266 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10267 // CHECK11:       omp.inner.for.body:
10268 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !27
10269 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !27
10270 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !27
10271 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10272 // CHECK11:       omp.inner.for.inc:
10273 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10274 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !27
10275 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
10276 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27
10277 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
10278 // CHECK11:       omp.inner.for.end:
10279 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10280 // CHECK11:       omp.loop.exit:
10281 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10282 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
10283 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
10284 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10285 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
10286 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10287 // CHECK11:       .omp.final.then:
10288 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10289 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
10290 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
10291 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
10292 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
10293 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
10294 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10295 // CHECK11:       .omp.final.done:
10296 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10297 // CHECK11:       omp.precond.end:
10298 // CHECK11-NEXT:    ret void
10299 //
10300 //
10301 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3
10302 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10303 // CHECK11-NEXT:  entry:
10304 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10305 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10306 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
10307 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
10308 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
10309 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
10310 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
10311 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
10312 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10313 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10314 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10315 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10316 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10317 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10318 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10319 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10320 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10321 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
10322 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10323 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10324 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10325 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10326 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
10327 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
10328 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
10329 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
10330 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10331 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10332 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10333 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10334 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10335 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10336 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10337 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10338 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10339 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10340 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10341 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10342 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10343 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10344 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10345 // CHECK11:       omp.precond.then:
10346 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10347 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10348 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10349 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10350 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10351 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
10352 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
10353 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10354 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10355 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10356 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10357 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10358 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10359 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10360 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10361 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10362 // CHECK11:       cond.true:
10363 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10364 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10365 // CHECK11:       cond.false:
10366 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10367 // CHECK11-NEXT:    br label [[COND_END]]
10368 // CHECK11:       cond.end:
10369 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10370 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10371 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10372 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10373 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10374 // CHECK11:       omp.inner.for.cond:
10375 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
10376 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !30
10377 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10378 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10379 // CHECK11:       omp.inner.for.body:
10380 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
10381 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
10382 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10383 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !30
10384 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !30
10385 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
10386 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
10387 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !30
10388 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !30
10389 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
10390 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
10391 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !30
10392 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
10393 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !30
10394 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !30
10395 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
10396 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !30
10397 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10398 // CHECK11:       omp.body.continue:
10399 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10400 // CHECK11:       omp.inner.for.inc:
10401 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
10402 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
10403 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30
10404 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
10405 // CHECK11:       omp.inner.for.end:
10406 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10407 // CHECK11:       omp.loop.exit:
10408 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10409 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
10410 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
10411 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10412 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10413 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10414 // CHECK11:       .omp.final.then:
10415 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10416 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
10417 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
10418 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
10419 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
10420 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
10421 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10422 // CHECK11:       .omp.final.done:
10423 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10424 // CHECK11:       omp.precond.end:
10425 // CHECK11-NEXT:    ret void
10426 //
10427 //
10428 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l446
10429 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
10430 // CHECK11-NEXT:  entry:
10431 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
10432 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10433 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
10434 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
10435 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
10436 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
10437 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10438 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
10439 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
10440 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
10441 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10442 // CHECK11-NEXT:    ret void
10443 //
10444 //
10445 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6
10446 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10447 // CHECK11-NEXT:  entry:
10448 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10449 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10450 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
10451 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
10452 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
10453 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
10454 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
10455 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10456 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10457 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10458 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10459 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10460 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10461 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10462 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10463 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10464 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
10465 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10466 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10467 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
10468 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
10469 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
10470 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
10471 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
10472 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
10473 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10474 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
10475 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
10476 // CHECK11-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
10477 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
10478 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
10479 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10480 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
10481 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10482 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10483 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10484 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10485 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10486 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
10487 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10488 // CHECK11:       omp.precond.then:
10489 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10490 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10491 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
10492 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10493 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10494 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
10495 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10496 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10497 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
10498 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10499 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10500 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10501 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10502 // CHECK11:       cond.true:
10503 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10504 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10505 // CHECK11:       cond.false:
10506 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10507 // CHECK11-NEXT:    br label [[COND_END]]
10508 // CHECK11:       cond.end:
10509 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10510 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10511 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10512 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10513 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10514 // CHECK11:       omp.inner.for.cond:
10515 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10516 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
10517 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
10518 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
10519 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10520 // CHECK11:       omp.inner.for.body:
10521 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
10522 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10523 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]]), !llvm.access.group !33
10524 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10525 // CHECK11:       omp.inner.for.inc:
10526 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10527 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
10528 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
10529 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10530 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
10531 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
10532 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
10533 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
10534 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10535 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !33
10536 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
10537 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10538 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10539 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
10540 // CHECK11-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
10541 // CHECK11-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
10542 // CHECK11:       cond.true10:
10543 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !33
10544 // CHECK11-NEXT:    br label [[COND_END12:%.*]]
10545 // CHECK11:       cond.false11:
10546 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10547 // CHECK11-NEXT:    br label [[COND_END12]]
10548 // CHECK11:       cond.end12:
10549 // CHECK11-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
10550 // CHECK11-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !33
10551 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !33
10552 // CHECK11-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33
10553 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
10554 // CHECK11:       omp.inner.for.end:
10555 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10556 // CHECK11:       omp.loop.exit:
10557 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10558 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
10559 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
10560 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10561 // CHECK11-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
10562 // CHECK11-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10563 // CHECK11:       .omp.final.then:
10564 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10565 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
10566 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
10567 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
10568 // CHECK11-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
10569 // CHECK11-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
10570 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10571 // CHECK11:       .omp.final.done:
10572 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10573 // CHECK11:       omp.precond.end:
10574 // CHECK11-NEXT:    ret void
10575 //
10576 //
10577 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7
10578 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10579 // CHECK11-NEXT:  entry:
10580 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10581 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10582 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
10583 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
10584 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
10585 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
10586 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
10587 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
10588 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10589 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10590 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10591 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10592 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10593 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10594 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10595 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10596 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10597 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
10598 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10599 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10600 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10601 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10602 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
10603 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
10604 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
10605 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
10606 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10607 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10608 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10609 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10610 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10611 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10612 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10613 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10614 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10615 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10616 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10617 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10618 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10619 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10620 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10621 // CHECK11:       omp.precond.then:
10622 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10623 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10624 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10625 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10626 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10627 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
10628 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
10629 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10630 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10631 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10632 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10633 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10634 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10635 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10636 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10637 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10638 // CHECK11:       cond.true:
10639 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10640 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10641 // CHECK11:       cond.false:
10642 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10643 // CHECK11-NEXT:    br label [[COND_END]]
10644 // CHECK11:       cond.end:
10645 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10646 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10647 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10648 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10649 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10650 // CHECK11:       omp.inner.for.cond:
10651 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
10652 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !36
10653 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10654 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10655 // CHECK11:       omp.inner.for.body:
10656 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
10657 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
10658 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10659 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !36
10660 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !36
10661 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
10662 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
10663 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !36
10664 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !36
10665 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
10666 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
10667 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !36
10668 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
10669 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !36
10670 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !36
10671 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
10672 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !36
10673 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10674 // CHECK11:       omp.body.continue:
10675 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10676 // CHECK11:       omp.inner.for.inc:
10677 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
10678 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
10679 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36
10680 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
10681 // CHECK11:       omp.inner.for.end:
10682 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10683 // CHECK11:       omp.loop.exit:
10684 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10685 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
10686 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
10687 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10688 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10689 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10690 // CHECK11:       .omp.final.then:
10691 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10692 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
10693 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
10694 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
10695 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
10696 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
10697 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10698 // CHECK11:       .omp.final.done:
10699 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10700 // CHECK11:       omp.precond.end:
10701 // CHECK11-NEXT:    ret void
10702 //
10703 //
10704 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l477
10705 // CHECK11-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
10706 // CHECK11-NEXT:  entry:
10707 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10708 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
10709 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
10710 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
10711 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10712 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
10713 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
10714 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
10715 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10716 // CHECK11-NEXT:    ret void
10717 //
10718 //
10719 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10
10720 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10721 // CHECK11-NEXT:  entry:
10722 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10723 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10724 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
10725 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
10726 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
10727 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
10728 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10729 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10730 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10731 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10732 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10733 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10734 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10735 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10736 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10737 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
10738 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10739 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10740 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
10741 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
10742 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
10743 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
10744 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10745 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10746 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10747 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10748 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10749 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10750 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10751 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10752 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10753 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10754 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10755 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10756 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10757 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10758 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10759 // CHECK11:       omp.precond.then:
10760 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
10761 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10762 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
10763 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10764 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10765 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10766 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
10767 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10768 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10769 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10770 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
10771 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10772 // CHECK11:       cond.true:
10773 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10774 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10775 // CHECK11:       cond.false:
10776 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
10777 // CHECK11-NEXT:    br label [[COND_END]]
10778 // CHECK11:       cond.end:
10779 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
10780 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
10781 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
10782 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
10783 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10784 // CHECK11:       omp.inner.for.cond:
10785 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
10786 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
10787 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
10788 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10789 // CHECK11:       omp.inner.for.body:
10790 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !39
10791 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39
10792 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !39
10793 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10794 // CHECK11:       omp.inner.for.inc:
10795 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
10796 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !39
10797 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
10798 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39
10799 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
10800 // CHECK11:       omp.inner.for.end:
10801 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10802 // CHECK11:       omp.loop.exit:
10803 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10804 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
10805 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
10806 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10807 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
10808 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10809 // CHECK11:       .omp.final.then:
10810 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10811 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
10812 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
10813 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
10814 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
10815 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
10816 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10817 // CHECK11:       .omp.final.done:
10818 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10819 // CHECK11:       omp.precond.end:
10820 // CHECK11-NEXT:    ret void
10821 //
10822 //
10823 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11
10824 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10825 // CHECK11-NEXT:  entry:
10826 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10827 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10828 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
10829 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
10830 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
10831 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
10832 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
10833 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
10834 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10835 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10836 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10837 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10838 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10839 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
10840 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
10841 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10842 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10843 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
10844 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10845 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10846 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10847 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10848 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
10849 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
10850 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
10851 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
10852 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10853 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
10854 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
10855 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
10856 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
10857 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
10858 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10859 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
10860 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10861 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10862 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
10863 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
10864 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10865 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
10866 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
10867 // CHECK11:       omp.precond.then:
10868 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
10869 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10870 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
10871 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
10872 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
10873 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
10874 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
10875 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
10876 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
10877 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10878 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
10879 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
10880 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10881 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10882 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
10883 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10884 // CHECK11:       cond.true:
10885 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
10886 // CHECK11-NEXT:    br label [[COND_END:%.*]]
10887 // CHECK11:       cond.false:
10888 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
10889 // CHECK11-NEXT:    br label [[COND_END]]
10890 // CHECK11:       cond.end:
10891 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
10892 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
10893 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
10894 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
10895 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
10896 // CHECK11:       omp.inner.for.cond:
10897 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
10898 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !42
10899 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
10900 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10901 // CHECK11:       omp.inner.for.body:
10902 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
10903 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
10904 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10905 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !42
10906 // CHECK11-NEXT:    [[TMP20:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !42
10907 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
10908 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP20]], i32 [[TMP21]]
10909 // CHECK11-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !42
10910 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !42
10911 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
10912 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
10913 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !42
10914 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP22]], [[TMP25]]
10915 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !42
10916 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !42
10917 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
10918 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !42
10919 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
10920 // CHECK11:       omp.body.continue:
10921 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
10922 // CHECK11:       omp.inner.for.inc:
10923 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
10924 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
10925 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42
10926 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
10927 // CHECK11:       omp.inner.for.end:
10928 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
10929 // CHECK11:       omp.loop.exit:
10930 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
10931 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
10932 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
10933 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
10934 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
10935 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10936 // CHECK11:       .omp.final.then:
10937 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
10938 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
10939 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
10940 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
10941 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
10942 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
10943 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
10944 // CHECK11:       .omp.final.done:
10945 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
10946 // CHECK11:       omp.precond.end:
10947 // CHECK11-NEXT:    ret void
10948 //
10949 //
10950 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l505
10951 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
10952 // CHECK11-NEXT:  entry:
10953 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
10954 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
10955 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
10956 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
10957 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
10958 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
10959 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
10960 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
10961 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
10962 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
10963 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
10964 // CHECK11-NEXT:    ret void
10965 //
10966 //
10967 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..14
10968 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
10969 // CHECK11-NEXT:  entry:
10970 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
10971 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
10972 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
10973 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
10974 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
10975 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
10976 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
10977 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10978 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
10979 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
10980 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10981 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
10982 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
10983 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10984 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10985 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10986 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10987 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
10988 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
10989 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
10990 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
10991 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
10992 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
10993 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
10994 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
10995 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
10996 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
10997 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
10998 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
10999 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
11000 // CHECK11-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
11001 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
11002 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
11003 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
11004 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11005 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11006 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
11007 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11008 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11009 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
11010 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
11011 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11012 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
11013 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11014 // CHECK11:       omp.precond.then:
11015 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11016 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11017 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
11018 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11019 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11020 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11021 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11022 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11023 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11024 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11025 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11026 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11027 // CHECK11:       cond.true:
11028 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11029 // CHECK11-NEXT:    br label [[COND_END:%.*]]
11030 // CHECK11:       cond.false:
11031 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11032 // CHECK11-NEXT:    br label [[COND_END]]
11033 // CHECK11:       cond.end:
11034 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11035 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11036 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11037 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11038 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11039 // CHECK11:       omp.inner.for.cond:
11040 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
11041 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
11042 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11043 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11044 // CHECK11:       omp.inner.for.body:
11045 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !45
11046 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !45
11047 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !45
11048 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
11049 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !45
11050 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !45
11051 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11052 // CHECK11:       omp.inner.for.inc:
11053 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
11054 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !45
11055 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
11056 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45
11057 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
11058 // CHECK11:       omp.inner.for.end:
11059 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11060 // CHECK11:       omp.loop.exit:
11061 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11062 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
11063 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
11064 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11065 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
11066 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11067 // CHECK11:       .omp.final.then:
11068 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11069 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
11070 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
11071 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
11072 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
11073 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
11074 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11075 // CHECK11:       .omp.final.done:
11076 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
11077 // CHECK11:       omp.precond.end:
11078 // CHECK11-NEXT:    ret void
11079 //
11080 //
11081 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15
11082 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
11083 // CHECK11-NEXT:  entry:
11084 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11085 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11086 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11087 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11088 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
11089 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
11090 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
11091 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
11092 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11093 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11094 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11095 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11096 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11097 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
11098 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11099 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11100 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11101 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11102 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
11103 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11104 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11105 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11106 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11107 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
11108 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
11109 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
11110 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
11111 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11112 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11113 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
11114 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
11115 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
11116 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11117 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11118 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11119 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11120 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11121 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11122 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
11123 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
11124 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11125 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11126 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11127 // CHECK11:       omp.precond.then:
11128 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11129 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11130 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11131 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11132 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11133 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
11134 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
11135 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11136 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11137 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11138 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11139 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
11140 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
11141 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
11142 // CHECK11:       omp.dispatch.cond:
11143 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11144 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11145 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
11146 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11147 // CHECK11:       cond.true:
11148 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11149 // CHECK11-NEXT:    br label [[COND_END:%.*]]
11150 // CHECK11:       cond.false:
11151 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11152 // CHECK11-NEXT:    br label [[COND_END]]
11153 // CHECK11:       cond.end:
11154 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
11155 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
11156 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11157 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
11158 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
11159 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11160 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
11161 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11162 // CHECK11:       omp.dispatch.body:
11163 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11164 // CHECK11:       omp.inner.for.cond:
11165 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
11166 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !48
11167 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
11168 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11169 // CHECK11:       omp.inner.for.body:
11170 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
11171 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
11172 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11173 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !48
11174 // CHECK11-NEXT:    [[TMP23:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !48
11175 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
11176 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP23]], i32 [[TMP24]]
11177 // CHECK11-NEXT:    [[TMP25:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !48
11178 // CHECK11-NEXT:    [[TMP26:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !48
11179 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
11180 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
11181 // CHECK11-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX8]], align 4, !llvm.access.group !48
11182 // CHECK11-NEXT:    [[ADD9:%.*]] = fadd double [[TMP25]], [[TMP28]]
11183 // CHECK11-NEXT:    [[TMP29:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !48
11184 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !48
11185 // CHECK11-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
11186 // CHECK11-NEXT:    store double [[ADD9]], double* [[ARRAYIDX10]], align 4, !llvm.access.group !48
11187 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11188 // CHECK11:       omp.body.continue:
11189 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11190 // CHECK11:       omp.inner.for.inc:
11191 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
11192 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
11193 // CHECK11-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48
11194 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]]
11195 // CHECK11:       omp.inner.for.end:
11196 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
11197 // CHECK11:       omp.dispatch.inc:
11198 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11199 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11200 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
11201 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
11202 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11203 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
11204 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
11205 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
11206 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
11207 // CHECK11:       omp.dispatch.end:
11208 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11209 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
11210 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
11211 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11212 // CHECK11-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
11213 // CHECK11-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11214 // CHECK11:       .omp.final.then:
11215 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11216 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
11217 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
11218 // CHECK11-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
11219 // CHECK11-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
11220 // CHECK11-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
11221 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11222 // CHECK11:       .omp.final.done:
11223 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
11224 // CHECK11:       omp.precond.end:
11225 // CHECK11-NEXT:    ret void
11226 //
11227 //
11228 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l535
11229 // CHECK11-SAME: (i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
11230 // CHECK11-NEXT:  entry:
11231 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
11232 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
11233 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
11234 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
11235 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
11236 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
11237 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
11238 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
11239 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
11240 // CHECK11-NEXT:    ret void
11241 //
11242 //
11243 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..18
11244 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
11245 // CHECK11-NEXT:  entry:
11246 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11247 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11248 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
11249 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
11250 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
11251 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
11252 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11253 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11254 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11255 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11256 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
11257 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11258 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11259 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11260 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11261 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
11262 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11263 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11264 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
11265 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
11266 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
11267 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
11268 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11269 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
11270 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
11271 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
11272 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11273 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11274 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11275 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11276 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11277 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11278 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11279 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
11280 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11281 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11282 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11283 // CHECK11:       omp.precond.then:
11284 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11285 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11286 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
11287 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11288 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11289 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11290 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
11291 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11292 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11293 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11294 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11295 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11296 // CHECK11:       cond.true:
11297 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11298 // CHECK11-NEXT:    br label [[COND_END:%.*]]
11299 // CHECK11:       cond.false:
11300 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11301 // CHECK11-NEXT:    br label [[COND_END]]
11302 // CHECK11:       cond.end:
11303 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11304 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11305 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11306 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
11307 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11308 // CHECK11:       omp.inner.for.cond:
11309 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
11310 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
11311 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
11312 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11313 // CHECK11:       omp.inner.for.body:
11314 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !51
11315 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51
11316 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], double** [[TMP1]], double** [[TMP2]], double** [[TMP3]]), !llvm.access.group !51
11317 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11318 // CHECK11:       omp.inner.for.inc:
11319 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
11320 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !51
11321 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
11322 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51
11323 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]]
11324 // CHECK11:       omp.inner.for.end:
11325 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11326 // CHECK11:       omp.loop.exit:
11327 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11328 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
11329 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
11330 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11331 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
11332 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11333 // CHECK11:       .omp.final.then:
11334 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11335 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
11336 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11337 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11338 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11339 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
11340 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11341 // CHECK11:       .omp.final.done:
11342 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
11343 // CHECK11:       omp.precond.end:
11344 // CHECK11-NEXT:    ret void
11345 //
11346 //
11347 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..19
11348 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
11349 // CHECK11-NEXT:  entry:
11350 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11351 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11352 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11353 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11354 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
11355 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
11356 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
11357 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
11358 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11359 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11360 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11361 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11362 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
11363 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11364 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11365 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11366 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11367 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
11368 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11369 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11370 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11371 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11372 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
11373 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
11374 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
11375 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
11376 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11377 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
11378 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
11379 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
11380 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11381 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
11382 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11383 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11384 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11385 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11386 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11387 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
11388 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11389 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11390 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11391 // CHECK11:       omp.precond.then:
11392 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11393 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11394 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11395 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11396 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11397 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
11398 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
11399 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11400 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11401 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11402 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11403 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11404 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
11405 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
11406 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
11407 // CHECK11:       omp.dispatch.cond:
11408 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11409 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
11410 // CHECK11-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
11411 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
11412 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11413 // CHECK11:       omp.dispatch.body:
11414 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11415 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
11416 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11417 // CHECK11:       omp.inner.for.cond:
11418 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
11419 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !54
11420 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
11421 // CHECK11-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11422 // CHECK11:       omp.inner.for.body:
11423 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
11424 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
11425 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11426 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !54
11427 // CHECK11-NEXT:    [[TMP21:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !54
11428 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
11429 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP21]], i32 [[TMP22]]
11430 // CHECK11-NEXT:    [[TMP23:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !54
11431 // CHECK11-NEXT:    [[TMP24:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !54
11432 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
11433 // CHECK11-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP24]], i32 [[TMP25]]
11434 // CHECK11-NEXT:    [[TMP26:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !54
11435 // CHECK11-NEXT:    [[ADD6:%.*]] = fadd double [[TMP23]], [[TMP26]]
11436 // CHECK11-NEXT:    [[TMP27:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !54
11437 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !54
11438 // CHECK11-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP27]], i32 [[TMP28]]
11439 // CHECK11-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !54
11440 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11441 // CHECK11:       omp.body.continue:
11442 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11443 // CHECK11:       omp.inner.for.inc:
11444 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
11445 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
11446 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54
11447 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]]
11448 // CHECK11:       omp.inner.for.end:
11449 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
11450 // CHECK11:       omp.dispatch.inc:
11451 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
11452 // CHECK11:       omp.dispatch.end:
11453 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11454 // CHECK11-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
11455 // CHECK11-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11456 // CHECK11:       .omp.final.then:
11457 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11458 // CHECK11-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
11459 // CHECK11-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
11460 // CHECK11-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
11461 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
11462 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
11463 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11464 // CHECK11:       .omp.final.done:
11465 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
11466 // CHECK11:       omp.precond.end:
11467 // CHECK11-NEXT:    ret void
11468 //
11469 //
11470 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l561
11471 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], double* noundef [[A:%.*]], double* noundef [[B:%.*]], double* noundef [[C:%.*]]) #[[ATTR1]] {
11472 // CHECK11-NEXT:  entry:
11473 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
11474 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
11475 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 4
11476 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double*, align 4
11477 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double*, align 4
11478 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
11479 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
11480 // CHECK11-NEXT:    store double* [[A]], double** [[A_ADDR]], align 4
11481 // CHECK11-NEXT:    store double* [[B]], double** [[B_ADDR]], align 4
11482 // CHECK11-NEXT:    store double* [[C]], double** [[C_ADDR]], align 4
11483 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, double**, double**, double**)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], double** [[A_ADDR]], double** [[B_ADDR]], double** [[C_ADDR]])
11484 // CHECK11-NEXT:    ret void
11485 //
11486 //
11487 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..22
11488 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
11489 // CHECK11-NEXT:  entry:
11490 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11491 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11492 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
11493 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
11494 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
11495 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
11496 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
11497 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11498 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11499 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11500 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11501 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11502 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
11503 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11504 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11505 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11506 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11507 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
11508 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11509 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11510 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11511 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
11512 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
11513 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
11514 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
11515 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
11516 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
11517 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11518 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[A_ADDR]], align 4
11519 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[B_ADDR]], align 4
11520 // CHECK11-NEXT:    [[TMP4:%.*]] = load double**, double*** [[C_ADDR]], align 4
11521 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
11522 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
11523 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
11524 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11525 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11526 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
11527 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11528 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11529 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
11530 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
11531 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11532 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
11533 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11534 // CHECK11:       omp.precond.then:
11535 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
11536 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11537 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
11538 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11539 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11540 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11541 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
11542 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
11543 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11544 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11545 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
11546 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11547 // CHECK11:       cond.true:
11548 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11549 // CHECK11-NEXT:    br label [[COND_END:%.*]]
11550 // CHECK11:       cond.false:
11551 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
11552 // CHECK11-NEXT:    br label [[COND_END]]
11553 // CHECK11:       cond.end:
11554 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
11555 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
11556 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
11557 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
11558 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11559 // CHECK11:       omp.inner.for.cond:
11560 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
11561 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
11562 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11563 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11564 // CHECK11:       omp.inner.for.body:
11565 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !57
11566 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !57
11567 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !57
11568 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
11569 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !57
11570 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, double**, double**, double**, i32)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], double** [[TMP2]], double** [[TMP3]], double** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !57
11571 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11572 // CHECK11:       omp.inner.for.inc:
11573 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
11574 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !57
11575 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
11576 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57
11577 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]]
11578 // CHECK11:       omp.inner.for.end:
11579 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
11580 // CHECK11:       omp.loop.exit:
11581 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11582 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
11583 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
11584 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11585 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
11586 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11587 // CHECK11:       .omp.final.then:
11588 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11589 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
11590 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
11591 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
11592 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
11593 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
11594 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11595 // CHECK11:       .omp.final.done:
11596 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
11597 // CHECK11:       omp.precond.end:
11598 // CHECK11-NEXT:    ret void
11599 //
11600 //
11601 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..23
11602 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], double** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
11603 // CHECK11-NEXT:  entry:
11604 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
11605 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
11606 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11607 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11608 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
11609 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca double**, align 4
11610 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca double**, align 4
11611 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca double**, align 4
11612 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11613 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
11614 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11615 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11616 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11617 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
11618 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
11619 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
11620 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11621 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11622 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
11623 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
11624 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
11625 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11626 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11627 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
11628 // CHECK11-NEXT:    store double** [[A]], double*** [[A_ADDR]], align 4
11629 // CHECK11-NEXT:    store double** [[B]], double*** [[B_ADDR]], align 4
11630 // CHECK11-NEXT:    store double** [[C]], double*** [[C_ADDR]], align 4
11631 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11632 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
11633 // CHECK11-NEXT:    [[TMP1:%.*]] = load double**, double*** [[A_ADDR]], align 4
11634 // CHECK11-NEXT:    [[TMP2:%.*]] = load double**, double*** [[B_ADDR]], align 4
11635 // CHECK11-NEXT:    [[TMP3:%.*]] = load double**, double*** [[C_ADDR]], align 4
11636 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
11637 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11638 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11639 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
11640 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11641 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11642 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
11643 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
11644 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11645 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
11646 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11647 // CHECK11:       omp.precond.then:
11648 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
11649 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
11650 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
11651 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
11652 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
11653 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
11654 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
11655 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
11656 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
11657 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
11658 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11659 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
11660 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11661 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
11662 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
11663 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
11664 // CHECK11:       omp.dispatch.cond:
11665 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
11666 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
11667 // CHECK11-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
11668 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
11669 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11670 // CHECK11:       omp.dispatch.body:
11671 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
11672 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
11673 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
11674 // CHECK11:       omp.inner.for.cond:
11675 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
11676 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !60
11677 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
11678 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11679 // CHECK11:       omp.inner.for.body:
11680 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
11681 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
11682 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11683 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !60
11684 // CHECK11-NEXT:    [[TMP22:%.*]] = load double*, double** [[TMP2]], align 4, !llvm.access.group !60
11685 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
11686 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP22]], i32 [[TMP23]]
11687 // CHECK11-NEXT:    [[TMP24:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !60
11688 // CHECK11-NEXT:    [[TMP25:%.*]] = load double*, double** [[TMP3]], align 4, !llvm.access.group !60
11689 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
11690 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP25]], i32 [[TMP26]]
11691 // CHECK11-NEXT:    [[TMP27:%.*]] = load double, double* [[ARRAYIDX6]], align 4, !llvm.access.group !60
11692 // CHECK11-NEXT:    [[ADD7:%.*]] = fadd double [[TMP24]], [[TMP27]]
11693 // CHECK11-NEXT:    [[TMP28:%.*]] = load double*, double** [[TMP1]], align 4, !llvm.access.group !60
11694 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !60
11695 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[TMP28]], i32 [[TMP29]]
11696 // CHECK11-NEXT:    store double [[ADD7]], double* [[ARRAYIDX8]], align 4, !llvm.access.group !60
11697 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
11698 // CHECK11:       omp.body.continue:
11699 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
11700 // CHECK11:       omp.inner.for.inc:
11701 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
11702 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
11703 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60
11704 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP61:![0-9]+]]
11705 // CHECK11:       omp.inner.for.end:
11706 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
11707 // CHECK11:       omp.dispatch.inc:
11708 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
11709 // CHECK11:       omp.dispatch.end:
11710 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
11711 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11712 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11713 // CHECK11:       .omp.final.then:
11714 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11715 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
11716 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
11717 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
11718 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
11719 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
11720 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
11721 // CHECK11:       .omp.final.done:
11722 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
11723 // CHECK11:       omp.precond.end:
11724 // CHECK11-NEXT:    ret void
11725 //
11726 //
11727 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
11728 // CHECK11-SAME: () #[[ATTR3:[0-9]+]] comdat {
11729 // CHECK11-NEXT:  entry:
11730 // CHECK11-NEXT:    [[A:%.*]] = alloca i32*, align 4
11731 // CHECK11-NEXT:    [[B:%.*]] = alloca i32*, align 4
11732 // CHECK11-NEXT:    [[C:%.*]] = alloca i32*, align 4
11733 // CHECK11-NEXT:    [[N:%.*]] = alloca i32, align 4
11734 // CHECK11-NEXT:    [[CH:%.*]] = alloca i32, align 4
11735 // CHECK11-NEXT:    [[N_CASTED:%.*]] = alloca i32, align 4
11736 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4
11737 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4
11738 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4
11739 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
11740 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11741 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11742 // CHECK11-NEXT:    [[N_CASTED3:%.*]] = alloca i32, align 4
11743 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [4 x i8*], align 4
11744 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS5:%.*]] = alloca [4 x i8*], align 4
11745 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [4 x i8*], align 4
11746 // CHECK11-NEXT:    [[_TMP7:%.*]] = alloca i32, align 4
11747 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4
11748 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
11749 // CHECK11-NEXT:    [[CH_CASTED:%.*]] = alloca i32, align 4
11750 // CHECK11-NEXT:    [[N_CASTED16:%.*]] = alloca i32, align 4
11751 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [5 x i8*], align 4
11752 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS18:%.*]] = alloca [5 x i8*], align 4
11753 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [5 x i8*], align 4
11754 // CHECK11-NEXT:    [[_TMP20:%.*]] = alloca i32, align 4
11755 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
11756 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4
11757 // CHECK11-NEXT:    [[N_CASTED29:%.*]] = alloca i32, align 4
11758 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [4 x i8*], align 4
11759 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS31:%.*]] = alloca [4 x i8*], align 4
11760 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [4 x i8*], align 4
11761 // CHECK11-NEXT:    [[_TMP33:%.*]] = alloca i32, align 4
11762 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4
11763 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4
11764 // CHECK11-NEXT:    [[CH_CASTED42:%.*]] = alloca i32, align 4
11765 // CHECK11-NEXT:    [[N_CASTED43:%.*]] = alloca i32, align 4
11766 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [5 x i8*], align 4
11767 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS45:%.*]] = alloca [5 x i8*], align 4
11768 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [5 x i8*], align 4
11769 // CHECK11-NEXT:    [[_TMP47:%.*]] = alloca i32, align 4
11770 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4
11771 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4
11772 // CHECK11-NEXT:    [[N_CASTED56:%.*]] = alloca i32, align 4
11773 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [4 x i8*], align 4
11774 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS58:%.*]] = alloca [4 x i8*], align 4
11775 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [4 x i8*], align 4
11776 // CHECK11-NEXT:    [[_TMP60:%.*]] = alloca i32, align 4
11777 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
11778 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_62:%.*]] = alloca i32, align 4
11779 // CHECK11-NEXT:    [[CH_CASTED69:%.*]] = alloca i32, align 4
11780 // CHECK11-NEXT:    [[N_CASTED70:%.*]] = alloca i32, align 4
11781 // CHECK11-NEXT:    [[DOTOFFLOAD_BASEPTRS71:%.*]] = alloca [5 x i8*], align 4
11782 // CHECK11-NEXT:    [[DOTOFFLOAD_PTRS72:%.*]] = alloca [5 x i8*], align 4
11783 // CHECK11-NEXT:    [[DOTOFFLOAD_MAPPERS73:%.*]] = alloca [5 x i8*], align 4
11784 // CHECK11-NEXT:    [[_TMP74:%.*]] = alloca i32, align 4
11785 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_75:%.*]] = alloca i32, align 4
11786 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
11787 // CHECK11-NEXT:    store i32 10000, i32* [[N]], align 4
11788 // CHECK11-NEXT:    store i32 100, i32* [[CH]], align 4
11789 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
11790 // CHECK11-NEXT:    store i32 [[TMP0]], i32* [[N_CASTED]], align 4
11791 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_CASTED]], align 4
11792 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A]], align 4
11793 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[B]], align 4
11794 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[C]], align 4
11795 // CHECK11-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11796 // CHECK11-NEXT:    [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
11797 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP6]], align 4
11798 // CHECK11-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11799 // CHECK11-NEXT:    [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32*
11800 // CHECK11-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
11801 // CHECK11-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
11802 // CHECK11-NEXT:    store i8* null, i8** [[TMP9]], align 4
11803 // CHECK11-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
11804 // CHECK11-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
11805 // CHECK11-NEXT:    store i32* [[TMP2]], i32** [[TMP11]], align 4
11806 // CHECK11-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
11807 // CHECK11-NEXT:    [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32**
11808 // CHECK11-NEXT:    store i32* [[TMP2]], i32** [[TMP13]], align 4
11809 // CHECK11-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
11810 // CHECK11-NEXT:    store i8* null, i8** [[TMP14]], align 4
11811 // CHECK11-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
11812 // CHECK11-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32**
11813 // CHECK11-NEXT:    store i32* [[TMP3]], i32** [[TMP16]], align 4
11814 // CHECK11-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
11815 // CHECK11-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
11816 // CHECK11-NEXT:    store i32* [[TMP3]], i32** [[TMP18]], align 4
11817 // CHECK11-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
11818 // CHECK11-NEXT:    store i8* null, i8** [[TMP19]], align 4
11819 // CHECK11-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
11820 // CHECK11-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
11821 // CHECK11-NEXT:    store i32* [[TMP4]], i32** [[TMP21]], align 4
11822 // CHECK11-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
11823 // CHECK11-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
11824 // CHECK11-NEXT:    store i32* [[TMP4]], i32** [[TMP23]], align 4
11825 // CHECK11-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
11826 // CHECK11-NEXT:    store i8* null, i8** [[TMP24]], align 4
11827 // CHECK11-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
11828 // CHECK11-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
11829 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[N]], align 4
11830 // CHECK11-NEXT:    store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
11831 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
11832 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
11833 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11834 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11835 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
11836 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
11837 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
11838 // CHECK11-NEXT:    [[TMP30:%.*]] = zext i32 [[ADD]] to i64
11839 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP30]])
11840 // CHECK11-NEXT:    [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42.region_id, i32 4, i8** [[TMP25]], i8** [[TMP26]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11841 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
11842 // CHECK11-NEXT:    br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
11843 // CHECK11:       omp_offload.failed:
11844 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]], i32* [[TMP4]]) #[[ATTR2]]
11845 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT]]
11846 // CHECK11:       omp_offload.cont:
11847 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[N]], align 4
11848 // CHECK11-NEXT:    store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
11849 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
11850 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32*, i32** [[A]], align 4
11851 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[B]], align 4
11852 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32*, i32** [[C]], align 4
11853 // CHECK11-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
11854 // CHECK11-NEXT:    [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32*
11855 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP39]], align 4
11856 // CHECK11-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
11857 // CHECK11-NEXT:    [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32*
11858 // CHECK11-NEXT:    store i32 [[TMP34]], i32* [[TMP41]], align 4
11859 // CHECK11-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
11860 // CHECK11-NEXT:    store i8* null, i8** [[TMP42]], align 4
11861 // CHECK11-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
11862 // CHECK11-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32**
11863 // CHECK11-NEXT:    store i32* [[TMP35]], i32** [[TMP44]], align 4
11864 // CHECK11-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
11865 // CHECK11-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
11866 // CHECK11-NEXT:    store i32* [[TMP35]], i32** [[TMP46]], align 4
11867 // CHECK11-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
11868 // CHECK11-NEXT:    store i8* null, i8** [[TMP47]], align 4
11869 // CHECK11-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
11870 // CHECK11-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
11871 // CHECK11-NEXT:    store i32* [[TMP36]], i32** [[TMP49]], align 4
11872 // CHECK11-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
11873 // CHECK11-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
11874 // CHECK11-NEXT:    store i32* [[TMP36]], i32** [[TMP51]], align 4
11875 // CHECK11-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
11876 // CHECK11-NEXT:    store i8* null, i8** [[TMP52]], align 4
11877 // CHECK11-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 3
11878 // CHECK11-NEXT:    [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32**
11879 // CHECK11-NEXT:    store i32* [[TMP37]], i32** [[TMP54]], align 4
11880 // CHECK11-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 3
11881 // CHECK11-NEXT:    [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32**
11882 // CHECK11-NEXT:    store i32* [[TMP37]], i32** [[TMP56]], align 4
11883 // CHECK11-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 3
11884 // CHECK11-NEXT:    store i8* null, i8** [[TMP57]], align 4
11885 // CHECK11-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
11886 // CHECK11-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
11887 // CHECK11-NEXT:    [[TMP60:%.*]] = load i32, i32* [[N]], align 4
11888 // CHECK11-NEXT:    store i32 [[TMP60]], i32* [[DOTCAPTURE_EXPR_8]], align 4
11889 // CHECK11-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4
11890 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP61]], 0
11891 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
11892 // CHECK11-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1
11893 // CHECK11-NEXT:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4
11894 // CHECK11-NEXT:    [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
11895 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP62]], 1
11896 // CHECK11-NEXT:    [[TMP63:%.*]] = zext i32 [[ADD13]] to i64
11897 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP63]])
11898 // CHECK11-NEXT:    [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50.region_id, i32 4, i8** [[TMP58]], i8** [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11899 // CHECK11-NEXT:    [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
11900 // CHECK11-NEXT:    br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
11901 // CHECK11:       omp_offload.failed14:
11902 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50(i32 [[TMP34]], i32* [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]]
11903 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT15]]
11904 // CHECK11:       omp_offload.cont15:
11905 // CHECK11-NEXT:    [[TMP66:%.*]] = load i32, i32* [[CH]], align 4
11906 // CHECK11-NEXT:    store i32 [[TMP66]], i32* [[CH_CASTED]], align 4
11907 // CHECK11-NEXT:    [[TMP67:%.*]] = load i32, i32* [[CH_CASTED]], align 4
11908 // CHECK11-NEXT:    [[TMP68:%.*]] = load i32, i32* [[N]], align 4
11909 // CHECK11-NEXT:    store i32 [[TMP68]], i32* [[N_CASTED16]], align 4
11910 // CHECK11-NEXT:    [[TMP69:%.*]] = load i32, i32* [[N_CASTED16]], align 4
11911 // CHECK11-NEXT:    [[TMP70:%.*]] = load i32*, i32** [[A]], align 4
11912 // CHECK11-NEXT:    [[TMP71:%.*]] = load i32*, i32** [[B]], align 4
11913 // CHECK11-NEXT:    [[TMP72:%.*]] = load i32*, i32** [[C]], align 4
11914 // CHECK11-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
11915 // CHECK11-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
11916 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP74]], align 4
11917 // CHECK11-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
11918 // CHECK11-NEXT:    [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32*
11919 // CHECK11-NEXT:    store i32 [[TMP67]], i32* [[TMP76]], align 4
11920 // CHECK11-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
11921 // CHECK11-NEXT:    store i8* null, i8** [[TMP77]], align 4
11922 // CHECK11-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1
11923 // CHECK11-NEXT:    [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32*
11924 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP79]], align 4
11925 // CHECK11-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1
11926 // CHECK11-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32*
11927 // CHECK11-NEXT:    store i32 [[TMP69]], i32* [[TMP81]], align 4
11928 // CHECK11-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1
11929 // CHECK11-NEXT:    store i8* null, i8** [[TMP82]], align 4
11930 // CHECK11-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2
11931 // CHECK11-NEXT:    [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
11932 // CHECK11-NEXT:    store i32* [[TMP70]], i32** [[TMP84]], align 4
11933 // CHECK11-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2
11934 // CHECK11-NEXT:    [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
11935 // CHECK11-NEXT:    store i32* [[TMP70]], i32** [[TMP86]], align 4
11936 // CHECK11-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2
11937 // CHECK11-NEXT:    store i8* null, i8** [[TMP87]], align 4
11938 // CHECK11-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3
11939 // CHECK11-NEXT:    [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32**
11940 // CHECK11-NEXT:    store i32* [[TMP71]], i32** [[TMP89]], align 4
11941 // CHECK11-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3
11942 // CHECK11-NEXT:    [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32**
11943 // CHECK11-NEXT:    store i32* [[TMP71]], i32** [[TMP91]], align 4
11944 // CHECK11-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3
11945 // CHECK11-NEXT:    store i8* null, i8** [[TMP92]], align 4
11946 // CHECK11-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 4
11947 // CHECK11-NEXT:    [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32**
11948 // CHECK11-NEXT:    store i32* [[TMP72]], i32** [[TMP94]], align 4
11949 // CHECK11-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 4
11950 // CHECK11-NEXT:    [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32**
11951 // CHECK11-NEXT:    store i32* [[TMP72]], i32** [[TMP96]], align 4
11952 // CHECK11-NEXT:    [[TMP97:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 4
11953 // CHECK11-NEXT:    store i8* null, i8** [[TMP97]], align 4
11954 // CHECK11-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
11955 // CHECK11-NEXT:    [[TMP99:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
11956 // CHECK11-NEXT:    [[TMP100:%.*]] = load i32, i32* [[N]], align 4
11957 // CHECK11-NEXT:    store i32 [[TMP100]], i32* [[DOTCAPTURE_EXPR_21]], align 4
11958 // CHECK11-NEXT:    [[TMP101:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
11959 // CHECK11-NEXT:    [[SUB23:%.*]] = sub nsw i32 [[TMP101]], 0
11960 // CHECK11-NEXT:    [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1
11961 // CHECK11-NEXT:    [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1
11962 // CHECK11-NEXT:    store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4
11963 // CHECK11-NEXT:    [[TMP102:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4
11964 // CHECK11-NEXT:    [[ADD26:%.*]] = add nsw i32 [[TMP102]], 1
11965 // CHECK11-NEXT:    [[TMP103:%.*]] = zext i32 [[ADD26]] to i64
11966 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP103]])
11967 // CHECK11-NEXT:    [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58.region_id, i32 5, i8** [[TMP98]], i8** [[TMP99]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
11968 // CHECK11-NEXT:    [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0
11969 // CHECK11-NEXT:    br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
11970 // CHECK11:       omp_offload.failed27:
11971 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58(i32 [[TMP67]], i32 [[TMP69]], i32* [[TMP70]], i32* [[TMP71]], i32* [[TMP72]]) #[[ATTR2]]
11972 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT28]]
11973 // CHECK11:       omp_offload.cont28:
11974 // CHECK11-NEXT:    [[TMP106:%.*]] = load i32, i32* [[N]], align 4
11975 // CHECK11-NEXT:    store i32 [[TMP106]], i32* [[N_CASTED29]], align 4
11976 // CHECK11-NEXT:    [[TMP107:%.*]] = load i32, i32* [[N_CASTED29]], align 4
11977 // CHECK11-NEXT:    [[TMP108:%.*]] = load i32*, i32** [[A]], align 4
11978 // CHECK11-NEXT:    [[TMP109:%.*]] = load i32*, i32** [[B]], align 4
11979 // CHECK11-NEXT:    [[TMP110:%.*]] = load i32*, i32** [[C]], align 4
11980 // CHECK11-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
11981 // CHECK11-NEXT:    [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32*
11982 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP112]], align 4
11983 // CHECK11-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
11984 // CHECK11-NEXT:    [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32*
11985 // CHECK11-NEXT:    store i32 [[TMP107]], i32* [[TMP114]], align 4
11986 // CHECK11-NEXT:    [[TMP115:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0
11987 // CHECK11-NEXT:    store i8* null, i8** [[TMP115]], align 4
11988 // CHECK11-NEXT:    [[TMP116:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1
11989 // CHECK11-NEXT:    [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32**
11990 // CHECK11-NEXT:    store i32* [[TMP108]], i32** [[TMP117]], align 4
11991 // CHECK11-NEXT:    [[TMP118:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1
11992 // CHECK11-NEXT:    [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32**
11993 // CHECK11-NEXT:    store i32* [[TMP108]], i32** [[TMP119]], align 4
11994 // CHECK11-NEXT:    [[TMP120:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1
11995 // CHECK11-NEXT:    store i8* null, i8** [[TMP120]], align 4
11996 // CHECK11-NEXT:    [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2
11997 // CHECK11-NEXT:    [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32**
11998 // CHECK11-NEXT:    store i32* [[TMP109]], i32** [[TMP122]], align 4
11999 // CHECK11-NEXT:    [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2
12000 // CHECK11-NEXT:    [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32**
12001 // CHECK11-NEXT:    store i32* [[TMP109]], i32** [[TMP124]], align 4
12002 // CHECK11-NEXT:    [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2
12003 // CHECK11-NEXT:    store i8* null, i8** [[TMP125]], align 4
12004 // CHECK11-NEXT:    [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 3
12005 // CHECK11-NEXT:    [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32**
12006 // CHECK11-NEXT:    store i32* [[TMP110]], i32** [[TMP127]], align 4
12007 // CHECK11-NEXT:    [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 3
12008 // CHECK11-NEXT:    [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32**
12009 // CHECK11-NEXT:    store i32* [[TMP110]], i32** [[TMP129]], align 4
12010 // CHECK11-NEXT:    [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 3
12011 // CHECK11-NEXT:    store i8* null, i8** [[TMP130]], align 4
12012 // CHECK11-NEXT:    [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0
12013 // CHECK11-NEXT:    [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0
12014 // CHECK11-NEXT:    [[TMP133:%.*]] = load i32, i32* [[N]], align 4
12015 // CHECK11-NEXT:    store i32 [[TMP133]], i32* [[DOTCAPTURE_EXPR_34]], align 4
12016 // CHECK11-NEXT:    [[TMP134:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4
12017 // CHECK11-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[TMP134]], 0
12018 // CHECK11-NEXT:    [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1
12019 // CHECK11-NEXT:    [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1
12020 // CHECK11-NEXT:    store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4
12021 // CHECK11-NEXT:    [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4
12022 // CHECK11-NEXT:    [[ADD39:%.*]] = add nsw i32 [[TMP135]], 1
12023 // CHECK11-NEXT:    [[TMP136:%.*]] = zext i32 [[ADD39]] to i64
12024 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP136]])
12025 // CHECK11-NEXT:    [[TMP137:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, i32 4, i8** [[TMP131]], i8** [[TMP132]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.40, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.41, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
12026 // CHECK11-NEXT:    [[TMP138:%.*]] = icmp ne i32 [[TMP137]], 0
12027 // CHECK11-NEXT:    br i1 [[TMP138]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]]
12028 // CHECK11:       omp_offload.failed40:
12029 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(i32 [[TMP107]], i32* [[TMP108]], i32* [[TMP109]], i32* [[TMP110]]) #[[ATTR2]]
12030 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT41]]
12031 // CHECK11:       omp_offload.cont41:
12032 // CHECK11-NEXT:    [[TMP139:%.*]] = load i32, i32* [[CH]], align 4
12033 // CHECK11-NEXT:    store i32 [[TMP139]], i32* [[CH_CASTED42]], align 4
12034 // CHECK11-NEXT:    [[TMP140:%.*]] = load i32, i32* [[CH_CASTED42]], align 4
12035 // CHECK11-NEXT:    [[TMP141:%.*]] = load i32, i32* [[N]], align 4
12036 // CHECK11-NEXT:    store i32 [[TMP141]], i32* [[N_CASTED43]], align 4
12037 // CHECK11-NEXT:    [[TMP142:%.*]] = load i32, i32* [[N_CASTED43]], align 4
12038 // CHECK11-NEXT:    [[TMP143:%.*]] = load i32*, i32** [[A]], align 4
12039 // CHECK11-NEXT:    [[TMP144:%.*]] = load i32*, i32** [[B]], align 4
12040 // CHECK11-NEXT:    [[TMP145:%.*]] = load i32*, i32** [[C]], align 4
12041 // CHECK11-NEXT:    [[TMP146:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
12042 // CHECK11-NEXT:    [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32*
12043 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP147]], align 4
12044 // CHECK11-NEXT:    [[TMP148:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
12045 // CHECK11-NEXT:    [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
12046 // CHECK11-NEXT:    store i32 [[TMP140]], i32* [[TMP149]], align 4
12047 // CHECK11-NEXT:    [[TMP150:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0
12048 // CHECK11-NEXT:    store i8* null, i8** [[TMP150]], align 4
12049 // CHECK11-NEXT:    [[TMP151:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1
12050 // CHECK11-NEXT:    [[TMP152:%.*]] = bitcast i8** [[TMP151]] to i32*
12051 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP152]], align 4
12052 // CHECK11-NEXT:    [[TMP153:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1
12053 // CHECK11-NEXT:    [[TMP154:%.*]] = bitcast i8** [[TMP153]] to i32*
12054 // CHECK11-NEXT:    store i32 [[TMP142]], i32* [[TMP154]], align 4
12055 // CHECK11-NEXT:    [[TMP155:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1
12056 // CHECK11-NEXT:    store i8* null, i8** [[TMP155]], align 4
12057 // CHECK11-NEXT:    [[TMP156:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2
12058 // CHECK11-NEXT:    [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
12059 // CHECK11-NEXT:    store i32* [[TMP143]], i32** [[TMP157]], align 4
12060 // CHECK11-NEXT:    [[TMP158:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2
12061 // CHECK11-NEXT:    [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32**
12062 // CHECK11-NEXT:    store i32* [[TMP143]], i32** [[TMP159]], align 4
12063 // CHECK11-NEXT:    [[TMP160:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2
12064 // CHECK11-NEXT:    store i8* null, i8** [[TMP160]], align 4
12065 // CHECK11-NEXT:    [[TMP161:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3
12066 // CHECK11-NEXT:    [[TMP162:%.*]] = bitcast i8** [[TMP161]] to i32**
12067 // CHECK11-NEXT:    store i32* [[TMP144]], i32** [[TMP162]], align 4
12068 // CHECK11-NEXT:    [[TMP163:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3
12069 // CHECK11-NEXT:    [[TMP164:%.*]] = bitcast i8** [[TMP163]] to i32**
12070 // CHECK11-NEXT:    store i32* [[TMP144]], i32** [[TMP164]], align 4
12071 // CHECK11-NEXT:    [[TMP165:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3
12072 // CHECK11-NEXT:    store i8* null, i8** [[TMP165]], align 4
12073 // CHECK11-NEXT:    [[TMP166:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 4
12074 // CHECK11-NEXT:    [[TMP167:%.*]] = bitcast i8** [[TMP166]] to i32**
12075 // CHECK11-NEXT:    store i32* [[TMP145]], i32** [[TMP167]], align 4
12076 // CHECK11-NEXT:    [[TMP168:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 4
12077 // CHECK11-NEXT:    [[TMP169:%.*]] = bitcast i8** [[TMP168]] to i32**
12078 // CHECK11-NEXT:    store i32* [[TMP145]], i32** [[TMP169]], align 4
12079 // CHECK11-NEXT:    [[TMP170:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 4
12080 // CHECK11-NEXT:    store i8* null, i8** [[TMP170]], align 4
12081 // CHECK11-NEXT:    [[TMP171:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0
12082 // CHECK11-NEXT:    [[TMP172:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0
12083 // CHECK11-NEXT:    [[TMP173:%.*]] = load i32, i32* [[N]], align 4
12084 // CHECK11-NEXT:    store i32 [[TMP173]], i32* [[DOTCAPTURE_EXPR_48]], align 4
12085 // CHECK11-NEXT:    [[TMP174:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4
12086 // CHECK11-NEXT:    [[SUB50:%.*]] = sub nsw i32 [[TMP174]], 0
12087 // CHECK11-NEXT:    [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1
12088 // CHECK11-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1
12089 // CHECK11-NEXT:    store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4
12090 // CHECK11-NEXT:    [[TMP175:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4
12091 // CHECK11-NEXT:    [[ADD53:%.*]] = add nsw i32 [[TMP175]], 1
12092 // CHECK11-NEXT:    [[TMP176:%.*]] = zext i32 [[ADD53]] to i64
12093 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP176]])
12094 // CHECK11-NEXT:    [[TMP177:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74.region_id, i32 5, i8** [[TMP171]], i8** [[TMP172]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.44, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.45, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
12095 // CHECK11-NEXT:    [[TMP178:%.*]] = icmp ne i32 [[TMP177]], 0
12096 // CHECK11-NEXT:    br i1 [[TMP178]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]]
12097 // CHECK11:       omp_offload.failed54:
12098 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74(i32 [[TMP140]], i32 [[TMP142]], i32* [[TMP143]], i32* [[TMP144]], i32* [[TMP145]]) #[[ATTR2]]
12099 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT55]]
12100 // CHECK11:       omp_offload.cont55:
12101 // CHECK11-NEXT:    [[TMP179:%.*]] = load i32, i32* [[N]], align 4
12102 // CHECK11-NEXT:    store i32 [[TMP179]], i32* [[N_CASTED56]], align 4
12103 // CHECK11-NEXT:    [[TMP180:%.*]] = load i32, i32* [[N_CASTED56]], align 4
12104 // CHECK11-NEXT:    [[TMP181:%.*]] = load i32*, i32** [[A]], align 4
12105 // CHECK11-NEXT:    [[TMP182:%.*]] = load i32*, i32** [[B]], align 4
12106 // CHECK11-NEXT:    [[TMP183:%.*]] = load i32*, i32** [[C]], align 4
12107 // CHECK11-NEXT:    [[TMP184:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
12108 // CHECK11-NEXT:    [[TMP185:%.*]] = bitcast i8** [[TMP184]] to i32*
12109 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP185]], align 4
12110 // CHECK11-NEXT:    [[TMP186:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
12111 // CHECK11-NEXT:    [[TMP187:%.*]] = bitcast i8** [[TMP186]] to i32*
12112 // CHECK11-NEXT:    store i32 [[TMP180]], i32* [[TMP187]], align 4
12113 // CHECK11-NEXT:    [[TMP188:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
12114 // CHECK11-NEXT:    store i8* null, i8** [[TMP188]], align 4
12115 // CHECK11-NEXT:    [[TMP189:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
12116 // CHECK11-NEXT:    [[TMP190:%.*]] = bitcast i8** [[TMP189]] to i32**
12117 // CHECK11-NEXT:    store i32* [[TMP181]], i32** [[TMP190]], align 4
12118 // CHECK11-NEXT:    [[TMP191:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
12119 // CHECK11-NEXT:    [[TMP192:%.*]] = bitcast i8** [[TMP191]] to i32**
12120 // CHECK11-NEXT:    store i32* [[TMP181]], i32** [[TMP192]], align 4
12121 // CHECK11-NEXT:    [[TMP193:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
12122 // CHECK11-NEXT:    store i8* null, i8** [[TMP193]], align 4
12123 // CHECK11-NEXT:    [[TMP194:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 2
12124 // CHECK11-NEXT:    [[TMP195:%.*]] = bitcast i8** [[TMP194]] to i32**
12125 // CHECK11-NEXT:    store i32* [[TMP182]], i32** [[TMP195]], align 4
12126 // CHECK11-NEXT:    [[TMP196:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 2
12127 // CHECK11-NEXT:    [[TMP197:%.*]] = bitcast i8** [[TMP196]] to i32**
12128 // CHECK11-NEXT:    store i32* [[TMP182]], i32** [[TMP197]], align 4
12129 // CHECK11-NEXT:    [[TMP198:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 2
12130 // CHECK11-NEXT:    store i8* null, i8** [[TMP198]], align 4
12131 // CHECK11-NEXT:    [[TMP199:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 3
12132 // CHECK11-NEXT:    [[TMP200:%.*]] = bitcast i8** [[TMP199]] to i32**
12133 // CHECK11-NEXT:    store i32* [[TMP183]], i32** [[TMP200]], align 4
12134 // CHECK11-NEXT:    [[TMP201:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 3
12135 // CHECK11-NEXT:    [[TMP202:%.*]] = bitcast i8** [[TMP201]] to i32**
12136 // CHECK11-NEXT:    store i32* [[TMP183]], i32** [[TMP202]], align 4
12137 // CHECK11-NEXT:    [[TMP203:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 3
12138 // CHECK11-NEXT:    store i8* null, i8** [[TMP203]], align 4
12139 // CHECK11-NEXT:    [[TMP204:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
12140 // CHECK11-NEXT:    [[TMP205:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
12141 // CHECK11-NEXT:    [[TMP206:%.*]] = load i32, i32* [[N]], align 4
12142 // CHECK11-NEXT:    store i32 [[TMP206]], i32* [[DOTCAPTURE_EXPR_61]], align 4
12143 // CHECK11-NEXT:    [[TMP207:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
12144 // CHECK11-NEXT:    [[SUB63:%.*]] = sub nsw i32 [[TMP207]], 0
12145 // CHECK11-NEXT:    [[DIV64:%.*]] = sdiv i32 [[SUB63]], 1
12146 // CHECK11-NEXT:    [[SUB65:%.*]] = sub nsw i32 [[DIV64]], 1
12147 // CHECK11-NEXT:    store i32 [[SUB65]], i32* [[DOTCAPTURE_EXPR_62]], align 4
12148 // CHECK11-NEXT:    [[TMP208:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_62]], align 4
12149 // CHECK11-NEXT:    [[ADD66:%.*]] = add nsw i32 [[TMP208]], 1
12150 // CHECK11-NEXT:    [[TMP209:%.*]] = zext i32 [[ADD66]] to i64
12151 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP209]])
12152 // CHECK11-NEXT:    [[TMP210:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82.region_id, i32 4, i8** [[TMP204]], i8** [[TMP205]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.48, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.49, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
12153 // CHECK11-NEXT:    [[TMP211:%.*]] = icmp ne i32 [[TMP210]], 0
12154 // CHECK11-NEXT:    br i1 [[TMP211]], label [[OMP_OFFLOAD_FAILED67:%.*]], label [[OMP_OFFLOAD_CONT68:%.*]]
12155 // CHECK11:       omp_offload.failed67:
12156 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82(i32 [[TMP180]], i32* [[TMP181]], i32* [[TMP182]], i32* [[TMP183]]) #[[ATTR2]]
12157 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT68]]
12158 // CHECK11:       omp_offload.cont68:
12159 // CHECK11-NEXT:    [[TMP212:%.*]] = load i32, i32* [[CH]], align 4
12160 // CHECK11-NEXT:    store i32 [[TMP212]], i32* [[CH_CASTED69]], align 4
12161 // CHECK11-NEXT:    [[TMP213:%.*]] = load i32, i32* [[CH_CASTED69]], align 4
12162 // CHECK11-NEXT:    [[TMP214:%.*]] = load i32, i32* [[N]], align 4
12163 // CHECK11-NEXT:    store i32 [[TMP214]], i32* [[N_CASTED70]], align 4
12164 // CHECK11-NEXT:    [[TMP215:%.*]] = load i32, i32* [[N_CASTED70]], align 4
12165 // CHECK11-NEXT:    [[TMP216:%.*]] = load i32*, i32** [[A]], align 4
12166 // CHECK11-NEXT:    [[TMP217:%.*]] = load i32*, i32** [[B]], align 4
12167 // CHECK11-NEXT:    [[TMP218:%.*]] = load i32*, i32** [[C]], align 4
12168 // CHECK11-NEXT:    [[TMP219:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
12169 // CHECK11-NEXT:    [[TMP220:%.*]] = bitcast i8** [[TMP219]] to i32*
12170 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP220]], align 4
12171 // CHECK11-NEXT:    [[TMP221:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
12172 // CHECK11-NEXT:    [[TMP222:%.*]] = bitcast i8** [[TMP221]] to i32*
12173 // CHECK11-NEXT:    store i32 [[TMP213]], i32* [[TMP222]], align 4
12174 // CHECK11-NEXT:    [[TMP223:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 0
12175 // CHECK11-NEXT:    store i8* null, i8** [[TMP223]], align 4
12176 // CHECK11-NEXT:    [[TMP224:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 1
12177 // CHECK11-NEXT:    [[TMP225:%.*]] = bitcast i8** [[TMP224]] to i32*
12178 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP225]], align 4
12179 // CHECK11-NEXT:    [[TMP226:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 1
12180 // CHECK11-NEXT:    [[TMP227:%.*]] = bitcast i8** [[TMP226]] to i32*
12181 // CHECK11-NEXT:    store i32 [[TMP215]], i32* [[TMP227]], align 4
12182 // CHECK11-NEXT:    [[TMP228:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 1
12183 // CHECK11-NEXT:    store i8* null, i8** [[TMP228]], align 4
12184 // CHECK11-NEXT:    [[TMP229:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 2
12185 // CHECK11-NEXT:    [[TMP230:%.*]] = bitcast i8** [[TMP229]] to i32**
12186 // CHECK11-NEXT:    store i32* [[TMP216]], i32** [[TMP230]], align 4
12187 // CHECK11-NEXT:    [[TMP231:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 2
12188 // CHECK11-NEXT:    [[TMP232:%.*]] = bitcast i8** [[TMP231]] to i32**
12189 // CHECK11-NEXT:    store i32* [[TMP216]], i32** [[TMP232]], align 4
12190 // CHECK11-NEXT:    [[TMP233:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 2
12191 // CHECK11-NEXT:    store i8* null, i8** [[TMP233]], align 4
12192 // CHECK11-NEXT:    [[TMP234:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 3
12193 // CHECK11-NEXT:    [[TMP235:%.*]] = bitcast i8** [[TMP234]] to i32**
12194 // CHECK11-NEXT:    store i32* [[TMP217]], i32** [[TMP235]], align 4
12195 // CHECK11-NEXT:    [[TMP236:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 3
12196 // CHECK11-NEXT:    [[TMP237:%.*]] = bitcast i8** [[TMP236]] to i32**
12197 // CHECK11-NEXT:    store i32* [[TMP217]], i32** [[TMP237]], align 4
12198 // CHECK11-NEXT:    [[TMP238:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 3
12199 // CHECK11-NEXT:    store i8* null, i8** [[TMP238]], align 4
12200 // CHECK11-NEXT:    [[TMP239:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 4
12201 // CHECK11-NEXT:    [[TMP240:%.*]] = bitcast i8** [[TMP239]] to i32**
12202 // CHECK11-NEXT:    store i32* [[TMP218]], i32** [[TMP240]], align 4
12203 // CHECK11-NEXT:    [[TMP241:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 4
12204 // CHECK11-NEXT:    [[TMP242:%.*]] = bitcast i8** [[TMP241]] to i32**
12205 // CHECK11-NEXT:    store i32* [[TMP218]], i32** [[TMP242]], align 4
12206 // CHECK11-NEXT:    [[TMP243:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS73]], i32 0, i32 4
12207 // CHECK11-NEXT:    store i8* null, i8** [[TMP243]], align 4
12208 // CHECK11-NEXT:    [[TMP244:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS71]], i32 0, i32 0
12209 // CHECK11-NEXT:    [[TMP245:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS72]], i32 0, i32 0
12210 // CHECK11-NEXT:    [[TMP246:%.*]] = load i32, i32* [[N]], align 4
12211 // CHECK11-NEXT:    store i32 [[TMP246]], i32* [[DOTCAPTURE_EXPR_75]], align 4
12212 // CHECK11-NEXT:    [[TMP247:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_75]], align 4
12213 // CHECK11-NEXT:    [[SUB77:%.*]] = sub nsw i32 [[TMP247]], 0
12214 // CHECK11-NEXT:    [[DIV78:%.*]] = sdiv i32 [[SUB77]], 1
12215 // CHECK11-NEXT:    [[SUB79:%.*]] = sub nsw i32 [[DIV78]], 1
12216 // CHECK11-NEXT:    store i32 [[SUB79]], i32* [[DOTCAPTURE_EXPR_76]], align 4
12217 // CHECK11-NEXT:    [[TMP248:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
12218 // CHECK11-NEXT:    [[ADD80:%.*]] = add nsw i32 [[TMP248]], 1
12219 // CHECK11-NEXT:    [[TMP249:%.*]] = zext i32 [[ADD80]] to i64
12220 // CHECK11-NEXT:    call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP249]])
12221 // CHECK11-NEXT:    [[TMP250:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90.region_id, i32 5, i8** [[TMP244]], i8** [[TMP245]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.52, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.53, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1)
12222 // CHECK11-NEXT:    [[TMP251:%.*]] = icmp ne i32 [[TMP250]], 0
12223 // CHECK11-NEXT:    br i1 [[TMP251]], label [[OMP_OFFLOAD_FAILED81:%.*]], label [[OMP_OFFLOAD_CONT82:%.*]]
12224 // CHECK11:       omp_offload.failed81:
12225 // CHECK11-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90(i32 [[TMP213]], i32 [[TMP215]], i32* [[TMP216]], i32* [[TMP217]], i32* [[TMP218]]) #[[ATTR2]]
12226 // CHECK11-NEXT:    br label [[OMP_OFFLOAD_CONT82]]
12227 // CHECK11:       omp_offload.cont82:
12228 // CHECK11-NEXT:    ret i32 0
12229 //
12230 //
12231 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l42
12232 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
12233 // CHECK11-NEXT:  entry:
12234 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12235 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
12236 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
12237 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
12238 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12239 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
12240 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
12241 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
12242 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12243 // CHECK11-NEXT:    ret void
12244 //
12245 //
12246 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..26
12247 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
12248 // CHECK11-NEXT:  entry:
12249 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12250 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12251 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
12252 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
12253 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
12254 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
12255 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12256 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12257 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12258 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12259 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
12260 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12261 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12262 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12263 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12264 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
12265 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12266 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12267 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
12268 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
12269 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
12270 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
12271 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
12272 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
12273 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
12274 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
12275 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12276 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12277 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12278 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12279 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12280 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12281 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12282 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
12283 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12284 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12285 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12286 // CHECK11:       omp.precond.then:
12287 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12288 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12289 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
12290 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12291 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12292 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12293 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12294 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12295 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12296 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12297 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12298 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12299 // CHECK11:       cond.true:
12300 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12301 // CHECK11-NEXT:    br label [[COND_END:%.*]]
12302 // CHECK11:       cond.false:
12303 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12304 // CHECK11-NEXT:    br label [[COND_END]]
12305 // CHECK11:       cond.end:
12306 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12307 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12308 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12309 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12310 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12311 // CHECK11:       omp.inner.for.cond:
12312 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
12313 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
12314 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12315 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12316 // CHECK11:       omp.inner.for.body:
12317 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !63
12318 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !63
12319 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !63
12320 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12321 // CHECK11:       omp.inner.for.inc:
12322 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
12323 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !63
12324 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
12325 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63
12326 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP64:![0-9]+]]
12327 // CHECK11:       omp.inner.for.end:
12328 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12329 // CHECK11:       omp.loop.exit:
12330 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12331 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
12332 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
12333 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12334 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
12335 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12336 // CHECK11:       .omp.final.then:
12337 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12338 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
12339 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
12340 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
12341 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
12342 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
12343 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12344 // CHECK11:       .omp.final.done:
12345 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
12346 // CHECK11:       omp.precond.end:
12347 // CHECK11-NEXT:    ret void
12348 //
12349 //
12350 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..27
12351 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
12352 // CHECK11-NEXT:  entry:
12353 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12354 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12355 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12356 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12357 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
12358 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
12359 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
12360 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
12361 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12362 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12363 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12364 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12365 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
12366 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12367 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12368 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12369 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12370 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
12371 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12372 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12373 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12374 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12375 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
12376 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
12377 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
12378 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
12379 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
12380 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
12381 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
12382 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
12383 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12384 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12385 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12386 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12387 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12388 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12389 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12390 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
12391 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12392 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12393 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12394 // CHECK11:       omp.precond.then:
12395 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12396 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12397 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12398 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12399 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12400 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
12401 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
12402 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12403 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12404 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12405 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12406 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12407 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12408 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12409 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12410 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12411 // CHECK11:       cond.true:
12412 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12413 // CHECK11-NEXT:    br label [[COND_END:%.*]]
12414 // CHECK11:       cond.false:
12415 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12416 // CHECK11-NEXT:    br label [[COND_END]]
12417 // CHECK11:       cond.end:
12418 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12419 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12420 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12421 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12422 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12423 // CHECK11:       omp.inner.for.cond:
12424 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
12425 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !66
12426 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12427 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12428 // CHECK11:       omp.inner.for.body:
12429 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
12430 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
12431 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12432 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !66
12433 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !66
12434 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
12435 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
12436 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !66
12437 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !66
12438 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
12439 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
12440 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !66
12441 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
12442 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !66
12443 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !66
12444 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
12445 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !66
12446 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12447 // CHECK11:       omp.body.continue:
12448 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12449 // CHECK11:       omp.inner.for.inc:
12450 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
12451 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
12452 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !66
12453 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP67:![0-9]+]]
12454 // CHECK11:       omp.inner.for.end:
12455 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12456 // CHECK11:       omp.loop.exit:
12457 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12458 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
12459 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
12460 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12461 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12462 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12463 // CHECK11:       .omp.final.then:
12464 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12465 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
12466 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
12467 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
12468 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
12469 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
12470 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12471 // CHECK11:       .omp.final.done:
12472 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
12473 // CHECK11:       omp.precond.end:
12474 // CHECK11-NEXT:    ret void
12475 //
12476 //
12477 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l50
12478 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
12479 // CHECK11-NEXT:  entry:
12480 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12481 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
12482 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
12483 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
12484 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12485 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
12486 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
12487 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
12488 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12489 // CHECK11-NEXT:    ret void
12490 //
12491 //
12492 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..30
12493 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
12494 // CHECK11-NEXT:  entry:
12495 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12496 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12497 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
12498 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
12499 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
12500 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
12501 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12502 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12503 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12504 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12505 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
12506 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12507 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12508 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12509 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12510 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
12511 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12512 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12513 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
12514 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
12515 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
12516 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
12517 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
12518 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
12519 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
12520 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
12521 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12522 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12523 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12524 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12525 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12526 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12527 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12528 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
12529 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12530 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12531 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12532 // CHECK11:       omp.precond.then:
12533 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12534 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12535 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
12536 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12537 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12538 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12539 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
12540 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12541 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12542 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12543 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
12544 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12545 // CHECK11:       cond.true:
12546 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12547 // CHECK11-NEXT:    br label [[COND_END:%.*]]
12548 // CHECK11:       cond.false:
12549 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12550 // CHECK11-NEXT:    br label [[COND_END]]
12551 // CHECK11:       cond.end:
12552 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
12553 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12554 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12555 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
12556 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12557 // CHECK11:       omp.inner.for.cond:
12558 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
12559 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
12560 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
12561 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12562 // CHECK11:       omp.inner.for.body:
12563 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !69
12564 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !69
12565 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !69
12566 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12567 // CHECK11:       omp.inner.for.inc:
12568 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
12569 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !69
12570 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
12571 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !69
12572 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP70:![0-9]+]]
12573 // CHECK11:       omp.inner.for.end:
12574 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12575 // CHECK11:       omp.loop.exit:
12576 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12577 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
12578 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
12579 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12580 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
12581 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12582 // CHECK11:       .omp.final.then:
12583 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12584 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
12585 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
12586 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
12587 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
12588 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
12589 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12590 // CHECK11:       .omp.final.done:
12591 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
12592 // CHECK11:       omp.precond.end:
12593 // CHECK11-NEXT:    ret void
12594 //
12595 //
12596 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..31
12597 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
12598 // CHECK11-NEXT:  entry:
12599 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12600 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12601 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12602 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12603 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
12604 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
12605 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
12606 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
12607 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12608 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12609 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12610 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12611 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
12612 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12613 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12614 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12615 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12616 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
12617 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12618 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12619 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12620 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12621 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
12622 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
12623 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
12624 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
12625 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
12626 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
12627 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
12628 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
12629 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12630 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12631 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12632 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12633 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12634 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12635 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12636 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
12637 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12638 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12639 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12640 // CHECK11:       omp.precond.then:
12641 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12642 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12643 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12644 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12645 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12646 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
12647 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
12648 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12649 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12650 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12651 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12652 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12653 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12654 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12655 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12656 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12657 // CHECK11:       cond.true:
12658 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12659 // CHECK11-NEXT:    br label [[COND_END:%.*]]
12660 // CHECK11:       cond.false:
12661 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12662 // CHECK11-NEXT:    br label [[COND_END]]
12663 // CHECK11:       cond.end:
12664 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12665 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12666 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12667 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12668 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12669 // CHECK11:       omp.inner.for.cond:
12670 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
12671 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !72
12672 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12673 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12674 // CHECK11:       omp.inner.for.body:
12675 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
12676 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
12677 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12678 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !72
12679 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !72
12680 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
12681 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
12682 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !72
12683 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !72
12684 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
12685 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
12686 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !72
12687 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
12688 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !72
12689 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !72
12690 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
12691 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !72
12692 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12693 // CHECK11:       omp.body.continue:
12694 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12695 // CHECK11:       omp.inner.for.inc:
12696 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
12697 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
12698 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !72
12699 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP73:![0-9]+]]
12700 // CHECK11:       omp.inner.for.end:
12701 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12702 // CHECK11:       omp.loop.exit:
12703 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12704 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
12705 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
12706 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12707 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12708 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12709 // CHECK11:       .omp.final.then:
12710 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12711 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
12712 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
12713 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
12714 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
12715 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
12716 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12717 // CHECK11:       .omp.final.done:
12718 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
12719 // CHECK11:       omp.precond.end:
12720 // CHECK11-NEXT:    ret void
12721 //
12722 //
12723 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l58
12724 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
12725 // CHECK11-NEXT:  entry:
12726 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
12727 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
12728 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
12729 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
12730 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
12731 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
12732 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
12733 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
12734 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
12735 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
12736 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..34 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
12737 // CHECK11-NEXT:    ret void
12738 //
12739 //
12740 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..34
12741 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
12742 // CHECK11-NEXT:  entry:
12743 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12744 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12745 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
12746 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
12747 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
12748 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
12749 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
12750 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12751 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12752 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12753 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12754 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
12755 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12756 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12757 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12758 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12759 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
12760 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12761 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12762 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
12763 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
12764 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
12765 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
12766 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
12767 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
12768 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
12769 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
12770 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
12771 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
12772 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP1]], align 4
12773 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
12774 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12775 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0
12776 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12777 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12778 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12779 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
12780 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12781 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP7]]
12782 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12783 // CHECK11:       omp.precond.then:
12784 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
12785 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12786 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_COMB_UB]], align 4
12787 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12788 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12789 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
12790 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12791 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12792 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP9]])
12793 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12794 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12795 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12796 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12797 // CHECK11:       cond.true:
12798 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12799 // CHECK11-NEXT:    br label [[COND_END:%.*]]
12800 // CHECK11:       cond.false:
12801 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
12802 // CHECK11-NEXT:    br label [[COND_END]]
12803 // CHECK11:       cond.end:
12804 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12805 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
12806 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
12807 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12808 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12809 // CHECK11:       omp.inner.for.cond:
12810 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
12811 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
12812 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP18]], 1
12813 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp slt i32 [[TMP17]], [[ADD]]
12814 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12815 // CHECK11:       omp.inner.for.body:
12816 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
12817 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
12818 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]]), !llvm.access.group !75
12819 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12820 // CHECK11:       omp.inner.for.inc:
12821 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
12822 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
12823 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
12824 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
12825 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
12826 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
12827 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
12828 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
12829 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
12830 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !75
12831 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
12832 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
12833 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
12834 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
12835 // CHECK11-NEXT:    [[CMP9:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
12836 // CHECK11-NEXT:    br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
12837 // CHECK11:       cond.true10:
12838 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group !75
12839 // CHECK11-NEXT:    br label [[COND_END12:%.*]]
12840 // CHECK11:       cond.false11:
12841 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
12842 // CHECK11-NEXT:    br label [[COND_END12]]
12843 // CHECK11:       cond.end12:
12844 // CHECK11-NEXT:    [[COND13:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE10]] ], [ [[TMP30]], [[COND_FALSE11]] ]
12845 // CHECK11-NEXT:    store i32 [[COND13]], i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !75
12846 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !75
12847 // CHECK11-NEXT:    store i32 [[TMP31]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !75
12848 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP76:![0-9]+]]
12849 // CHECK11:       omp.inner.for.end:
12850 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12851 // CHECK11:       omp.loop.exit:
12852 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12853 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4
12854 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]])
12855 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12856 // CHECK11-NEXT:    [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
12857 // CHECK11-NEXT:    br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12858 // CHECK11:       .omp.final.then:
12859 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12860 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP36]], 0
12861 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
12862 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV15]], 1
12863 // CHECK11-NEXT:    [[ADD16:%.*]] = add nsw i32 0, [[MUL]]
12864 // CHECK11-NEXT:    store i32 [[ADD16]], i32* [[I3]], align 4
12865 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12866 // CHECK11:       .omp.final.done:
12867 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
12868 // CHECK11:       omp.precond.end:
12869 // CHECK11-NEXT:    ret void
12870 //
12871 //
12872 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..35
12873 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
12874 // CHECK11-NEXT:  entry:
12875 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
12876 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
12877 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12878 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12879 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
12880 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
12881 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
12882 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
12883 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
12884 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
12885 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12886 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12887 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
12888 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
12889 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
12890 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12891 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12892 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
12893 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
12894 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
12895 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12896 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12897 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
12898 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
12899 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
12900 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
12901 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
12902 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
12903 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
12904 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
12905 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
12906 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
12907 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12908 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
12909 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12910 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
12911 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
12912 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
12913 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12914 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
12915 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12916 // CHECK11:       omp.precond.then:
12917 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
12918 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12919 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
12920 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
12921 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
12922 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
12923 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
12924 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
12925 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
12926 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12927 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
12928 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
12929 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12930 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12931 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
12932 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12933 // CHECK11:       cond.true:
12934 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
12935 // CHECK11-NEXT:    br label [[COND_END:%.*]]
12936 // CHECK11:       cond.false:
12937 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
12938 // CHECK11-NEXT:    br label [[COND_END]]
12939 // CHECK11:       cond.end:
12940 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
12941 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
12942 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
12943 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
12944 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
12945 // CHECK11:       omp.inner.for.cond:
12946 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
12947 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !78
12948 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
12949 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12950 // CHECK11:       omp.inner.for.body:
12951 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
12952 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
12953 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12954 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !78
12955 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !78
12956 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
12957 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
12958 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !78
12959 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !78
12960 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
12961 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
12962 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !78
12963 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
12964 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !78
12965 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !78
12966 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
12967 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !78
12968 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
12969 // CHECK11:       omp.body.continue:
12970 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
12971 // CHECK11:       omp.inner.for.inc:
12972 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
12973 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
12974 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !78
12975 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP79:![0-9]+]]
12976 // CHECK11:       omp.inner.for.end:
12977 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
12978 // CHECK11:       omp.loop.exit:
12979 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
12980 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
12981 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
12982 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
12983 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
12984 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12985 // CHECK11:       .omp.final.then:
12986 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
12987 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
12988 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
12989 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
12990 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
12991 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
12992 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
12993 // CHECK11:       .omp.final.done:
12994 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
12995 // CHECK11:       omp.precond.end:
12996 // CHECK11-NEXT:    ret void
12997 //
12998 //
12999 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
13000 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
13001 // CHECK11-NEXT:  entry:
13002 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13003 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
13004 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
13005 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
13006 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13007 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
13008 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
13009 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
13010 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..38 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
13011 // CHECK11-NEXT:    ret void
13012 //
13013 //
13014 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..38
13015 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13016 // CHECK11-NEXT:  entry:
13017 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13018 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13019 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
13020 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
13021 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
13022 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
13023 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13024 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13025 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13026 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13027 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
13028 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13029 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13030 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13031 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13032 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
13033 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13034 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13035 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
13036 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
13037 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
13038 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
13039 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13040 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13041 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13042 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13043 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13044 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13045 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13046 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13047 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13048 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13049 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13050 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
13051 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13052 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13053 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13054 // CHECK11:       omp.precond.then:
13055 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13056 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13057 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
13058 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13059 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13060 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13061 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
13062 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13063 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13064 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13065 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
13066 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13067 // CHECK11:       cond.true:
13068 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13069 // CHECK11-NEXT:    br label [[COND_END:%.*]]
13070 // CHECK11:       cond.false:
13071 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13072 // CHECK11-NEXT:    br label [[COND_END]]
13073 // CHECK11:       cond.end:
13074 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
13075 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13076 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13077 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
13078 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13079 // CHECK11:       omp.inner.for.cond:
13080 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
13081 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
13082 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
13083 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13084 // CHECK11:       omp.inner.for.body:
13085 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !81
13086 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !81
13087 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..39 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !81
13088 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13089 // CHECK11:       omp.inner.for.inc:
13090 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
13091 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !81
13092 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
13093 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !81
13094 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP82:![0-9]+]]
13095 // CHECK11:       omp.inner.for.end:
13096 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13097 // CHECK11:       omp.loop.exit:
13098 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13099 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
13100 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
13101 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13102 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
13103 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13104 // CHECK11:       .omp.final.then:
13105 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13106 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
13107 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
13108 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
13109 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
13110 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
13111 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13112 // CHECK11:       .omp.final.done:
13113 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
13114 // CHECK11:       omp.precond.end:
13115 // CHECK11-NEXT:    ret void
13116 //
13117 //
13118 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..39
13119 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13120 // CHECK11-NEXT:  entry:
13121 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13122 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13123 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13124 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13125 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
13126 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
13127 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
13128 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
13129 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13130 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13131 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13132 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13133 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
13134 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13135 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13136 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13137 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13138 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
13139 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13140 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13141 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13142 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13143 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
13144 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
13145 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
13146 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
13147 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13148 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13149 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13150 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13151 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13152 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13153 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13154 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13155 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13156 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13157 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13158 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
13159 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13160 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13161 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13162 // CHECK11:       omp.precond.then:
13163 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13164 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13165 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13166 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13167 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13168 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
13169 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
13170 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13171 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13172 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13173 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13174 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13175 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13176 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13177 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13178 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13179 // CHECK11:       cond.true:
13180 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13181 // CHECK11-NEXT:    br label [[COND_END:%.*]]
13182 // CHECK11:       cond.false:
13183 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13184 // CHECK11-NEXT:    br label [[COND_END]]
13185 // CHECK11:       cond.end:
13186 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13187 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13188 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13189 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13190 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13191 // CHECK11:       omp.inner.for.cond:
13192 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
13193 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !84
13194 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13195 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13196 // CHECK11:       omp.inner.for.body:
13197 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
13198 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
13199 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13200 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !84
13201 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !84
13202 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
13203 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP20]], i32 [[TMP21]]
13204 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !84
13205 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !84
13206 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
13207 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
13208 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !84
13209 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP25]]
13210 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !84
13211 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !84
13212 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
13213 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !84
13214 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13215 // CHECK11:       omp.body.continue:
13216 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13217 // CHECK11:       omp.inner.for.inc:
13218 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
13219 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP28]], 1
13220 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !84
13221 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP85:![0-9]+]]
13222 // CHECK11:       omp.inner.for.end:
13223 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13224 // CHECK11:       omp.loop.exit:
13225 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13226 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4
13227 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]])
13228 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13229 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
13230 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13231 // CHECK11:       .omp.final.then:
13232 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13233 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
13234 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
13235 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
13236 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
13237 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I3]], align 4
13238 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13239 // CHECK11:       .omp.final.done:
13240 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
13241 // CHECK11:       omp.precond.end:
13242 // CHECK11-NEXT:    ret void
13243 //
13244 //
13245 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l74
13246 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
13247 // CHECK11-NEXT:  entry:
13248 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
13249 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13250 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
13251 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
13252 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
13253 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
13254 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13255 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
13256 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
13257 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
13258 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..42 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
13259 // CHECK11-NEXT:    ret void
13260 //
13261 //
13262 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..42
13263 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13264 // CHECK11-NEXT:  entry:
13265 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13266 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13267 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
13268 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
13269 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
13270 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
13271 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
13272 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13273 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13274 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13275 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13276 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13277 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
13278 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13279 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13280 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13281 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13282 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
13283 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
13284 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13285 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13286 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
13287 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
13288 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
13289 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
13290 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
13291 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
13292 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13293 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13294 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13295 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13296 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
13297 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
13298 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
13299 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13300 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13301 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
13302 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13303 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13304 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13305 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
13306 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13307 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
13308 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13309 // CHECK11:       omp.precond.then:
13310 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13311 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13312 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
13313 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13314 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13315 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13316 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13317 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13318 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13319 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13320 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13321 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13322 // CHECK11:       cond.true:
13323 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13324 // CHECK11-NEXT:    br label [[COND_END:%.*]]
13325 // CHECK11:       cond.false:
13326 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13327 // CHECK11-NEXT:    br label [[COND_END]]
13328 // CHECK11:       cond.end:
13329 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13330 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13331 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13332 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13333 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13334 // CHECK11:       omp.inner.for.cond:
13335 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
13336 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
13337 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13338 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13339 // CHECK11:       omp.inner.for.body:
13340 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !87
13341 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !87
13342 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !87
13343 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
13344 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !87
13345 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..43 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !87
13346 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13347 // CHECK11:       omp.inner.for.inc:
13348 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
13349 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !87
13350 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
13351 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !87
13352 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP88:![0-9]+]]
13353 // CHECK11:       omp.inner.for.end:
13354 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13355 // CHECK11:       omp.loop.exit:
13356 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13357 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
13358 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
13359 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13360 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
13361 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13362 // CHECK11:       .omp.final.then:
13363 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13364 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
13365 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
13366 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
13367 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
13368 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
13369 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13370 // CHECK11:       .omp.final.done:
13371 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
13372 // CHECK11:       omp.precond.end:
13373 // CHECK11-NEXT:    ret void
13374 //
13375 //
13376 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..43
13377 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
13378 // CHECK11-NEXT:  entry:
13379 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13380 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13381 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13382 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13383 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
13384 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
13385 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
13386 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
13387 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
13388 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13389 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13390 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13391 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13392 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
13393 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13394 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13395 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13396 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13397 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
13398 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13399 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13400 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13401 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13402 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
13403 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
13404 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
13405 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
13406 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
13407 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13408 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13409 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13410 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13411 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13412 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13413 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13414 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13415 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13416 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13417 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13418 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
13419 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13420 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13421 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13422 // CHECK11:       omp.precond.then:
13423 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13424 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13425 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13426 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13427 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13428 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
13429 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
13430 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13431 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13432 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
13433 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13434 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
13435 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP12]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP10]])
13436 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
13437 // CHECK11:       omp.dispatch.cond:
13438 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13439 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13440 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]]
13441 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13442 // CHECK11:       cond.true:
13443 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13444 // CHECK11-NEXT:    br label [[COND_END:%.*]]
13445 // CHECK11:       cond.false:
13446 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13447 // CHECK11-NEXT:    br label [[COND_END]]
13448 // CHECK11:       cond.end:
13449 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ]
13450 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
13451 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13452 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
13453 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
13454 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13455 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
13456 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13457 // CHECK11:       omp.dispatch.body:
13458 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13459 // CHECK11:       omp.inner.for.cond:
13460 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
13461 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !90
13462 // CHECK11-NEXT:    [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
13463 // CHECK11-NEXT:    br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13464 // CHECK11:       omp.inner.for.body:
13465 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
13466 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1
13467 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13468 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !90
13469 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !90
13470 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
13471 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]]
13472 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !90
13473 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !90
13474 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
13475 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
13476 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4, !llvm.access.group !90
13477 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP28]]
13478 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !90
13479 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !90
13480 // CHECK11-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
13481 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[ARRAYIDX10]], align 4, !llvm.access.group !90
13482 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13483 // CHECK11:       omp.body.continue:
13484 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13485 // CHECK11:       omp.inner.for.inc:
13486 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
13487 // CHECK11-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP31]], 1
13488 // CHECK11-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !90
13489 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP91:![0-9]+]]
13490 // CHECK11:       omp.inner.for.end:
13491 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
13492 // CHECK11:       omp.dispatch.inc:
13493 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13494 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13495 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
13496 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_LB]], align 4
13497 // CHECK11-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13498 // CHECK11-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
13499 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
13500 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[DOTOMP_UB]], align 4
13501 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
13502 // CHECK11:       omp.dispatch.end:
13503 // CHECK11-NEXT:    [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13504 // CHECK11-NEXT:    [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4
13505 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP37]])
13506 // CHECK11-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13507 // CHECK11-NEXT:    [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
13508 // CHECK11-NEXT:    br i1 [[TMP39]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13509 // CHECK11:       .omp.final.then:
13510 // CHECK11-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13511 // CHECK11-NEXT:    [[SUB14:%.*]] = sub nsw i32 [[TMP40]], 0
13512 // CHECK11-NEXT:    [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
13513 // CHECK11-NEXT:    [[MUL16:%.*]] = mul nsw i32 [[DIV15]], 1
13514 // CHECK11-NEXT:    [[ADD17:%.*]] = add nsw i32 0, [[MUL16]]
13515 // CHECK11-NEXT:    store i32 [[ADD17]], i32* [[I4]], align 4
13516 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13517 // CHECK11:       .omp.final.done:
13518 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
13519 // CHECK11:       omp.precond.end:
13520 // CHECK11-NEXT:    ret void
13521 //
13522 //
13523 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l82
13524 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
13525 // CHECK11-NEXT:  entry:
13526 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13527 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
13528 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
13529 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
13530 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13531 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
13532 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
13533 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
13534 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..46 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
13535 // CHECK11-NEXT:    ret void
13536 //
13537 //
13538 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..46
13539 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13540 // CHECK11-NEXT:  entry:
13541 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13542 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13543 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
13544 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
13545 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
13546 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
13547 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13548 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13549 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13550 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13551 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
13552 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13553 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13554 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13555 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13556 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
13557 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13558 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13559 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
13560 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
13561 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
13562 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
13563 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13564 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13565 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13566 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13567 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13568 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13569 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13570 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13571 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13572 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13573 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13574 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
13575 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13576 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13577 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13578 // CHECK11:       omp.precond.then:
13579 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13580 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13581 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_COMB_UB]], align 4
13582 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13583 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13584 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13585 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
13586 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13587 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13588 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13589 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
13590 // CHECK11-NEXT:    br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13591 // CHECK11:       cond.true:
13592 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13593 // CHECK11-NEXT:    br label [[COND_END:%.*]]
13594 // CHECK11:       cond.false:
13595 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13596 // CHECK11-NEXT:    br label [[COND_END]]
13597 // CHECK11:       cond.end:
13598 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
13599 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13600 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13601 // CHECK11-NEXT:    store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
13602 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13603 // CHECK11:       omp.inner.for.cond:
13604 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
13605 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
13606 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
13607 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13608 // CHECK11:       omp.inner.for.body:
13609 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !93
13610 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !93
13611 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**)* @.omp_outlined..47 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32** [[TMP1]], i32** [[TMP2]], i32** [[TMP3]]), !llvm.access.group !93
13612 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13613 // CHECK11:       omp.inner.for.inc:
13614 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
13615 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !93
13616 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
13617 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !93
13618 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP94:![0-9]+]]
13619 // CHECK11:       omp.inner.for.end:
13620 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13621 // CHECK11:       omp.loop.exit:
13622 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13623 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
13624 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
13625 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13626 // CHECK11-NEXT:    [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
13627 // CHECK11-NEXT:    br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13628 // CHECK11:       .omp.final.then:
13629 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13630 // CHECK11-NEXT:    [[SUB6:%.*]] = sub nsw i32 [[TMP25]], 0
13631 // CHECK11-NEXT:    [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
13632 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
13633 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
13634 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[I3]], align 4
13635 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13636 // CHECK11:       .omp.final.done:
13637 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
13638 // CHECK11:       omp.precond.end:
13639 // CHECK11-NEXT:    ret void
13640 //
13641 //
13642 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..47
13643 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13644 // CHECK11-NEXT:  entry:
13645 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13646 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13647 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13648 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13649 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
13650 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
13651 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
13652 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
13653 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13654 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13655 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13656 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13657 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
13658 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13659 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13660 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13661 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13662 // CHECK11-NEXT:    [[I3:%.*]] = alloca i32, align 4
13663 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13664 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13665 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13666 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13667 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
13668 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
13669 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
13670 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
13671 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13672 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13673 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13674 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13675 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13676 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_]], align 4
13677 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13678 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13679 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13680 // CHECK11-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13681 // CHECK11-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13682 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
13683 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13684 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13685 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13686 // CHECK11:       omp.precond.then:
13687 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13688 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13689 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13690 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13691 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13692 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
13693 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
13694 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13695 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13696 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13697 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13698 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13699 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
13700 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 1)
13701 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
13702 // CHECK11:       omp.dispatch.cond:
13703 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13704 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4
13705 // CHECK11-NEXT:    [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP15]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
13706 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
13707 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13708 // CHECK11:       omp.dispatch.body:
13709 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13710 // CHECK11-NEXT:    store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4
13711 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13712 // CHECK11:       omp.inner.for.cond:
13713 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
13714 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !96
13715 // CHECK11-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
13716 // CHECK11-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13717 // CHECK11:       omp.inner.for.body:
13718 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
13719 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
13720 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13721 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !96
13722 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !96
13723 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
13724 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP21]], i32 [[TMP22]]
13725 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !96
13726 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !96
13727 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
13728 // CHECK11-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP24]], i32 [[TMP25]]
13729 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !96
13730 // CHECK11-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP23]], [[TMP26]]
13731 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !96
13732 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !96
13733 // CHECK11-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP27]], i32 [[TMP28]]
13734 // CHECK11-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !96
13735 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13736 // CHECK11:       omp.body.continue:
13737 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13738 // CHECK11:       omp.inner.for.inc:
13739 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
13740 // CHECK11-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP29]], 1
13741 // CHECK11-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !96
13742 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP97:![0-9]+]]
13743 // CHECK11:       omp.inner.for.end:
13744 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
13745 // CHECK11:       omp.dispatch.inc:
13746 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
13747 // CHECK11:       omp.dispatch.end:
13748 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13749 // CHECK11-NEXT:    [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0
13750 // CHECK11-NEXT:    br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13751 // CHECK11:       .omp.final.then:
13752 // CHECK11-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
13753 // CHECK11-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP32]], 0
13754 // CHECK11-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
13755 // CHECK11-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
13756 // CHECK11-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
13757 // CHECK11-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
13758 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13759 // CHECK11:       .omp.final.done:
13760 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
13761 // CHECK11:       omp.precond.end:
13762 // CHECK11-NEXT:    ret void
13763 //
13764 //
13765 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l90
13766 // CHECK11-SAME: (i32 noundef [[CH:%.*]], i32 noundef [[N:%.*]], i32* noundef [[A:%.*]], i32* noundef [[B:%.*]], i32* noundef [[C:%.*]]) #[[ATTR1]] {
13767 // CHECK11-NEXT:  entry:
13768 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32, align 4
13769 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
13770 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
13771 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32*, align 4
13772 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32*, align 4
13773 // CHECK11-NEXT:    store i32 [[CH]], i32* [[CH_ADDR]], align 4
13774 // CHECK11-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
13775 // CHECK11-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
13776 // CHECK11-NEXT:    store i32* [[B]], i32** [[B_ADDR]], align 4
13777 // CHECK11-NEXT:    store i32* [[C]], i32** [[C_ADDR]], align 4
13778 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*, i32**, i32**, i32**)* @.omp_outlined..50 to void (i32*, i32*, ...)*), i32* [[CH_ADDR]], i32* [[N_ADDR]], i32** [[A_ADDR]], i32** [[B_ADDR]], i32** [[C_ADDR]])
13779 // CHECK11-NEXT:    ret void
13780 //
13781 //
13782 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..50
13783 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[CH:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
13784 // CHECK11-NEXT:  entry:
13785 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13786 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13787 // CHECK11-NEXT:    [[CH_ADDR:%.*]] = alloca i32*, align 4
13788 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
13789 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
13790 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
13791 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
13792 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13793 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13794 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13795 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13796 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13797 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
13798 // CHECK11-NEXT:    [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13799 // CHECK11-NEXT:    [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13800 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13801 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13802 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
13803 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
13804 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13805 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13806 // CHECK11-NEXT:    store i32* [[CH]], i32** [[CH_ADDR]], align 4
13807 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
13808 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
13809 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
13810 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
13811 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[CH_ADDR]], align 4
13812 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13813 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13814 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13815 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13816 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
13817 // CHECK11-NEXT:    store i32 [[TMP5]], i32* [[DOTCAPTURE_EXPR_]], align 4
13818 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4
13819 // CHECK11-NEXT:    store i32 [[TMP6]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13820 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13821 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0
13822 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13823 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13824 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13825 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
13826 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13827 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP8]]
13828 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13829 // CHECK11:       omp.precond.then:
13830 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
13831 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13832 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_COMB_UB]], align 4
13833 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13834 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13835 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13836 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
13837 // CHECK11-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
13838 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13839 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13840 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sgt i32 [[TMP12]], [[TMP13]]
13841 // CHECK11-NEXT:    br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13842 // CHECK11:       cond.true:
13843 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13844 // CHECK11-NEXT:    br label [[COND_END:%.*]]
13845 // CHECK11:       cond.false:
13846 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
13847 // CHECK11-NEXT:    br label [[COND_END]]
13848 // CHECK11:       cond.end:
13849 // CHECK11-NEXT:    [[COND:%.*]] = phi i32 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ]
13850 // CHECK11-NEXT:    store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
13851 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
13852 // CHECK11-NEXT:    store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
13853 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13854 // CHECK11:       omp.inner.for.cond:
13855 // CHECK11-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
13856 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
13857 // CHECK11-NEXT:    [[CMP6:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
13858 // CHECK11-NEXT:    br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13859 // CHECK11:       omp.inner.for.body:
13860 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !99
13861 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !99
13862 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !99
13863 // CHECK11-NEXT:    store i32 [[TMP21]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
13864 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !99
13865 // CHECK11-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 7, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32**, i32**, i32**, i32)* @.omp_outlined..51 to void (i32*, i32*, ...)*), i32 [[TMP19]], i32 [[TMP20]], i32* [[TMP1]], i32** [[TMP2]], i32** [[TMP3]], i32** [[TMP4]], i32 [[TMP22]]), !llvm.access.group !99
13866 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13867 // CHECK11:       omp.inner.for.inc:
13868 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
13869 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !99
13870 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
13871 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !99
13872 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP100:![0-9]+]]
13873 // CHECK11:       omp.inner.for.end:
13874 // CHECK11-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
13875 // CHECK11:       omp.loop.exit:
13876 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13877 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
13878 // CHECK11-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
13879 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
13880 // CHECK11-NEXT:    [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
13881 // CHECK11-NEXT:    br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13882 // CHECK11:       .omp.final.then:
13883 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13884 // CHECK11-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[TMP29]], 0
13885 // CHECK11-NEXT:    [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
13886 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
13887 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
13888 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[I4]], align 4
13889 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
13890 // CHECK11:       .omp.final.done:
13891 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
13892 // CHECK11:       omp.precond.end:
13893 // CHECK11-NEXT:    ret void
13894 //
13895 //
13896 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..51
13897 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32** noundef nonnull align 4 dereferenceable(4) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
13898 // CHECK11-NEXT:  entry:
13899 // CHECK11-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
13900 // CHECK11-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
13901 // CHECK11-NEXT:    [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13902 // CHECK11-NEXT:    [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13903 // CHECK11-NEXT:    [[N_ADDR:%.*]] = alloca i32*, align 4
13904 // CHECK11-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 4
13905 // CHECK11-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 4
13906 // CHECK11-NEXT:    [[C_ADDR:%.*]] = alloca i32**, align 4
13907 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
13908 // CHECK11-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
13909 // CHECK11-NEXT:    [[TMP:%.*]] = alloca i32, align 4
13910 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13911 // CHECK11-NEXT:    [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
13912 // CHECK11-NEXT:    [[I:%.*]] = alloca i32, align 4
13913 // CHECK11-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
13914 // CHECK11-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
13915 // CHECK11-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13916 // CHECK11-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13917 // CHECK11-NEXT:    [[I4:%.*]] = alloca i32, align 4
13918 // CHECK11-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
13919 // CHECK11-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
13920 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13921 // CHECK11-NEXT:    store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13922 // CHECK11-NEXT:    store i32* [[N]], i32** [[N_ADDR]], align 4
13923 // CHECK11-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 4
13924 // CHECK11-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 4
13925 // CHECK11-NEXT:    store i32** [[C]], i32*** [[C_ADDR]], align 4
13926 // CHECK11-NEXT:    store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
13927 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4
13928 // CHECK11-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[A_ADDR]], align 4
13929 // CHECK11-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[B_ADDR]], align 4
13930 // CHECK11-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[C_ADDR]], align 4
13931 // CHECK11-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4
13932 // CHECK11-NEXT:    store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR_1]], align 4
13933 // CHECK11-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13934 // CHECK11-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
13935 // CHECK11-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13936 // CHECK11-NEXT:    [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
13937 // CHECK11-NEXT:    store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
13938 // CHECK11-NEXT:    store i32 0, i32* [[I]], align 4
13939 // CHECK11-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
13940 // CHECK11-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
13941 // CHECK11-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13942 // CHECK11:       omp.precond.then:
13943 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
13944 // CHECK11-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
13945 // CHECK11-NEXT:    store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
13946 // CHECK11-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
13947 // CHECK11-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
13948 // CHECK11-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_LB]], align 4
13949 // CHECK11-NEXT:    store i32 [[TMP9]], i32* [[DOTOMP_UB]], align 4
13950 // CHECK11-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
13951 // CHECK11-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
13952 // CHECK11-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
13953 // CHECK11-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13954 // CHECK11-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
13955 // CHECK11-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13956 // CHECK11-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
13957 // CHECK11-NEXT:    call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 35, i32 [[TMP11]], i32 [[TMP12]], i32 1, i32 [[TMP10]])
13958 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND:%.*]]
13959 // CHECK11:       omp.dispatch.cond:
13960 // CHECK11-NEXT:    [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
13961 // CHECK11-NEXT:    [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4
13962 // CHECK11-NEXT:    [[TMP17:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
13963 // CHECK11-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[TMP17]], 0
13964 // CHECK11-NEXT:    br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13965 // CHECK11:       omp.dispatch.body:
13966 // CHECK11-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
13967 // CHECK11-NEXT:    store i32 [[TMP18]], i32* [[DOTOMP_IV]], align 4
13968 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
13969 // CHECK11:       omp.inner.for.cond:
13970 // CHECK11-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
13971 // CHECK11-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !102
13972 // CHECK11-NEXT:    [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
13973 // CHECK11-NEXT:    br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13974 // CHECK11:       omp.inner.for.body:
13975 // CHECK11-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
13976 // CHECK11-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1
13977 // CHECK11-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13978 // CHECK11-NEXT:    store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !102
13979 // CHECK11-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[TMP2]], align 4, !llvm.access.group !102
13980 // CHECK11-NEXT:    [[TMP23:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
13981 // CHECK11-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP22]], i32 [[TMP23]]
13982 // CHECK11-NEXT:    [[TMP24:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !102
13983 // CHECK11-NEXT:    [[TMP25:%.*]] = load i32*, i32** [[TMP3]], align 4, !llvm.access.group !102
13984 // CHECK11-NEXT:    [[TMP26:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
13985 // CHECK11-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP25]], i32 [[TMP26]]
13986 // CHECK11-NEXT:    [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !102
13987 // CHECK11-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP24]], [[TMP27]]
13988 // CHECK11-NEXT:    [[TMP28:%.*]] = load i32*, i32** [[TMP1]], align 4, !llvm.access.group !102
13989 // CHECK11-NEXT:    [[TMP29:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !102
13990 // CHECK11-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP28]], i32 [[TMP29]]
13991 // CHECK11-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX8]], align 4, !llvm.access.group !102
13992 // CHECK11-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
13993 // CHECK11:       omp.body.continue:
13994 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
13995 // CHECK11:       omp.inner.for.inc:
13996 // CHECK11-NEXT:    [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
13997 // CHECK11-NEXT:    [[ADD9:%.*]] = add nsw i32 [[TMP30]], 1
13998 // CHECK11-NEXT:    store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !102
13999 // CHECK11-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP103:![0-9]+]]
14000 // CHECK11:       omp.inner.for.end:
14001 // CHECK11-NEXT:    br label [[OMP_DISPATCH_INC:%.*]]
14002 // CHECK11:       omp.dispatch.inc:
14003 // CHECK11-NEXT:    br label [[OMP_DISPATCH_COND]]
14004 // CHECK11:       omp.dispatch.end:
14005 // CHECK11-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
14006 // CHECK11-NEXT:    [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
14007 // CHECK11-NEXT:    br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14008 // CHECK11:       .omp.final.then:
14009 // CHECK11-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14010 // CHECK11-NEXT:    [[SUB10:%.*]] = sub nsw i32 [[TMP33]], 0
14011 // CHECK11-NEXT:    [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
14012 // CHECK11-NEXT:    [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
14013 // CHECK11-NEXT:    [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
14014 // CHECK11-NEXT:    store i32 [[ADD13]], i32* [[I4]], align 4
14015 // CHECK11-NEXT:    br label [[DOTOMP_FINAL_DONE]]
14016 // CHECK11:       .omp.final.done:
14017 // CHECK11-NEXT:    br label [[OMP_PRECOND_END]]
14018 // CHECK11:       omp.precond.end:
14019 // CHECK11-NEXT:    ret void
14020 //
14021 //
14022 // CHECK11-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
14023 // CHECK11-SAME: () #[[ATTR4:[0-9]+]] {
14024 // CHECK11-NEXT:  entry:
14025 // CHECK11-NEXT:    call void @__tgt_register_requires(i64 1)
14026 // CHECK11-NEXT:    ret void
14027 //
14028 //
14029 // CHECK13-LABEL: define {{[^@]+}}@main
14030 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
14031 // CHECK13-NEXT:  entry:
14032 // CHECK13-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
14033 // CHECK13-NEXT:    [[A:%.*]] = alloca double*, align 8
14034 // CHECK13-NEXT:    [[B:%.*]] = alloca double*, align 8
14035 // CHECK13-NEXT:    [[C:%.*]] = alloca double*, align 8
14036 // CHECK13-NEXT:    [[N:%.*]] = alloca i32, align 4
14037 // CHECK13-NEXT:    [[CH:%.*]] = alloca i32, align 4
14038 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14039 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14040 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14041 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14042 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14043 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
14044 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14045 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
14046 // CHECK13-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
14047 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
14048 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
14049 // CHECK13-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
14050 // CHECK13-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
14051 // CHECK13-NEXT:    [[I23:%.*]] = alloca i32, align 4
14052 // CHECK13-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
14053 // CHECK13-NEXT:    [[I27:%.*]] = alloca i32, align 4
14054 // CHECK13-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
14055 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
14056 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
14057 // CHECK13-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
14058 // CHECK13-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
14059 // CHECK13-NEXT:    [[I57:%.*]] = alloca i32, align 4
14060 // CHECK13-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
14061 // CHECK13-NEXT:    [[I61:%.*]] = alloca i32, align 4
14062 // CHECK13-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
14063 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
14064 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
14065 // CHECK13-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
14066 // CHECK13-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
14067 // CHECK13-NEXT:    [[I91:%.*]] = alloca i32, align 4
14068 // CHECK13-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
14069 // CHECK13-NEXT:    [[I95:%.*]] = alloca i32, align 4
14070 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
14071 // CHECK13-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
14072 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
14073 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
14074 // CHECK13-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
14075 // CHECK13-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
14076 // CHECK13-NEXT:    [[I126:%.*]] = alloca i32, align 4
14077 // CHECK13-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
14078 // CHECK13-NEXT:    [[I130:%.*]] = alloca i32, align 4
14079 // CHECK13-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
14080 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
14081 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
14082 // CHECK13-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
14083 // CHECK13-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
14084 // CHECK13-NEXT:    [[I160:%.*]] = alloca i32, align 4
14085 // CHECK13-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
14086 // CHECK13-NEXT:    [[I164:%.*]] = alloca i32, align 4
14087 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
14088 // CHECK13-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
14089 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
14090 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
14091 // CHECK13-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
14092 // CHECK13-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
14093 // CHECK13-NEXT:    [[I195:%.*]] = alloca i32, align 4
14094 // CHECK13-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
14095 // CHECK13-NEXT:    [[I199:%.*]] = alloca i32, align 4
14096 // CHECK13-NEXT:    store i32 0, i32* [[RETVAL]], align 4
14097 // CHECK13-NEXT:    store i32 10000, i32* [[N]], align 4
14098 // CHECK13-NEXT:    store i32 100, i32* [[CH]], align 4
14099 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
14100 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
14101 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14102 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
14103 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14104 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14105 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14106 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14107 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14108 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
14109 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
14110 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14111 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
14112 // CHECK13-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
14113 // CHECK13:       simd.if.then:
14114 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14115 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
14116 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14117 // CHECK13:       omp.inner.for.cond:
14118 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14119 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2
14120 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
14121 // CHECK13-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14122 // CHECK13:       omp.inner.for.body:
14123 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14124 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
14125 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14126 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2
14127 // CHECK13-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !2
14128 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
14129 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
14130 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i64 [[IDXPROM]]
14131 // CHECK13-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 8, !llvm.access.group !2
14132 // CHECK13-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !2
14133 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
14134 // CHECK13-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
14135 // CHECK13-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds double, double* [[TMP11]], i64 [[IDXPROM5]]
14136 // CHECK13-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX6]], align 8, !llvm.access.group !2
14137 // CHECK13-NEXT:    [[ADD7:%.*]] = fadd double [[TMP10]], [[TMP13]]
14138 // CHECK13-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !2
14139 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2
14140 // CHECK13-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
14141 // CHECK13-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, double* [[TMP14]], i64 [[IDXPROM8]]
14142 // CHECK13-NEXT:    store double [[ADD7]], double* [[ARRAYIDX9]], align 8, !llvm.access.group !2
14143 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14144 // CHECK13:       omp.body.continue:
14145 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14146 // CHECK13:       omp.inner.for.inc:
14147 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14148 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
14149 // CHECK13-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
14150 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
14151 // CHECK13:       omp.inner.for.end:
14152 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14153 // CHECK13-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
14154 // CHECK13-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
14155 // CHECK13-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
14156 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
14157 // CHECK13-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
14158 // CHECK13-NEXT:    br label [[SIMD_IF_END]]
14159 // CHECK13:       simd.if.end:
14160 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
14161 // CHECK13-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
14162 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
14163 // CHECK13-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
14164 // CHECK13-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
14165 // CHECK13-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
14166 // CHECK13-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
14167 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
14168 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
14169 // CHECK13-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
14170 // CHECK13-NEXT:    store i32 0, i32* [[I23]], align 4
14171 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
14172 // CHECK13-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
14173 // CHECK13-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
14174 // CHECK13:       simd.if.then25:
14175 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
14176 // CHECK13-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
14177 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
14178 // CHECK13:       omp.inner.for.cond28:
14179 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
14180 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !6
14181 // CHECK13-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
14182 // CHECK13-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
14183 // CHECK13:       omp.inner.for.body30:
14184 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
14185 // CHECK13-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
14186 // CHECK13-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
14187 // CHECK13-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !6
14188 // CHECK13-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !6
14189 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
14190 // CHECK13-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
14191 // CHECK13-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP26]], i64 [[IDXPROM33]]
14192 // CHECK13-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX34]], align 8, !llvm.access.group !6
14193 // CHECK13-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !6
14194 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
14195 // CHECK13-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
14196 // CHECK13-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds double, double* [[TMP29]], i64 [[IDXPROM35]]
14197 // CHECK13-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX36]], align 8, !llvm.access.group !6
14198 // CHECK13-NEXT:    [[ADD37:%.*]] = fadd double [[TMP28]], [[TMP31]]
14199 // CHECK13-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !6
14200 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !6
14201 // CHECK13-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
14202 // CHECK13-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds double, double* [[TMP32]], i64 [[IDXPROM38]]
14203 // CHECK13-NEXT:    store double [[ADD37]], double* [[ARRAYIDX39]], align 8, !llvm.access.group !6
14204 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
14205 // CHECK13:       omp.body.continue40:
14206 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
14207 // CHECK13:       omp.inner.for.inc41:
14208 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
14209 // CHECK13-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
14210 // CHECK13-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !6
14211 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP7:![0-9]+]]
14212 // CHECK13:       omp.inner.for.end43:
14213 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
14214 // CHECK13-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
14215 // CHECK13-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
14216 // CHECK13-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
14217 // CHECK13-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
14218 // CHECK13-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
14219 // CHECK13-NEXT:    br label [[SIMD_IF_END48]]
14220 // CHECK13:       simd.if.end48:
14221 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
14222 // CHECK13-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
14223 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
14224 // CHECK13-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
14225 // CHECK13-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
14226 // CHECK13-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
14227 // CHECK13-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
14228 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
14229 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
14230 // CHECK13-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
14231 // CHECK13-NEXT:    store i32 0, i32* [[I57]], align 4
14232 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
14233 // CHECK13-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
14234 // CHECK13-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
14235 // CHECK13:       simd.if.then59:
14236 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
14237 // CHECK13-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
14238 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
14239 // CHECK13:       omp.inner.for.cond62:
14240 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
14241 // CHECK13-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !9
14242 // CHECK13-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
14243 // CHECK13-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
14244 // CHECK13:       omp.inner.for.body64:
14245 // CHECK13-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
14246 // CHECK13-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
14247 // CHECK13-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
14248 // CHECK13-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !9
14249 // CHECK13-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !9
14250 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
14251 // CHECK13-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
14252 // CHECK13-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds double, double* [[TMP44]], i64 [[IDXPROM67]]
14253 // CHECK13-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX68]], align 8, !llvm.access.group !9
14254 // CHECK13-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !9
14255 // CHECK13-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
14256 // CHECK13-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
14257 // CHECK13-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds double, double* [[TMP47]], i64 [[IDXPROM69]]
14258 // CHECK13-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX70]], align 8, !llvm.access.group !9
14259 // CHECK13-NEXT:    [[ADD71:%.*]] = fadd double [[TMP46]], [[TMP49]]
14260 // CHECK13-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !9
14261 // CHECK13-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !9
14262 // CHECK13-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
14263 // CHECK13-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds double, double* [[TMP50]], i64 [[IDXPROM72]]
14264 // CHECK13-NEXT:    store double [[ADD71]], double* [[ARRAYIDX73]], align 8, !llvm.access.group !9
14265 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
14266 // CHECK13:       omp.body.continue74:
14267 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
14268 // CHECK13:       omp.inner.for.inc75:
14269 // CHECK13-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
14270 // CHECK13-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
14271 // CHECK13-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !9
14272 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP10:![0-9]+]]
14273 // CHECK13:       omp.inner.for.end77:
14274 // CHECK13-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
14275 // CHECK13-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
14276 // CHECK13-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
14277 // CHECK13-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
14278 // CHECK13-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
14279 // CHECK13-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
14280 // CHECK13-NEXT:    br label [[SIMD_IF_END82]]
14281 // CHECK13:       simd.if.end82:
14282 // CHECK13-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
14283 // CHECK13-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
14284 // CHECK13-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
14285 // CHECK13-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
14286 // CHECK13-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
14287 // CHECK13-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
14288 // CHECK13-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
14289 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
14290 // CHECK13-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
14291 // CHECK13-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
14292 // CHECK13-NEXT:    store i32 0, i32* [[I91]], align 4
14293 // CHECK13-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
14294 // CHECK13-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
14295 // CHECK13-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
14296 // CHECK13:       simd.if.then93:
14297 // CHECK13-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
14298 // CHECK13-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
14299 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
14300 // CHECK13:       omp.inner.for.cond96:
14301 // CHECK13-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
14302 // CHECK13-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !12
14303 // CHECK13-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
14304 // CHECK13-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
14305 // CHECK13:       omp.inner.for.body98:
14306 // CHECK13-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
14307 // CHECK13-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
14308 // CHECK13-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
14309 // CHECK13-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !12
14310 // CHECK13-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !12
14311 // CHECK13-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
14312 // CHECK13-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
14313 // CHECK13-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds double, double* [[TMP62]], i64 [[IDXPROM101]]
14314 // CHECK13-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX102]], align 8, !llvm.access.group !12
14315 // CHECK13-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !12
14316 // CHECK13-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
14317 // CHECK13-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
14318 // CHECK13-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds double, double* [[TMP65]], i64 [[IDXPROM103]]
14319 // CHECK13-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX104]], align 8, !llvm.access.group !12
14320 // CHECK13-NEXT:    [[ADD105:%.*]] = fadd double [[TMP64]], [[TMP67]]
14321 // CHECK13-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !12
14322 // CHECK13-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !12
14323 // CHECK13-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
14324 // CHECK13-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds double, double* [[TMP68]], i64 [[IDXPROM106]]
14325 // CHECK13-NEXT:    store double [[ADD105]], double* [[ARRAYIDX107]], align 8, !llvm.access.group !12
14326 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
14327 // CHECK13:       omp.body.continue108:
14328 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
14329 // CHECK13:       omp.inner.for.inc109:
14330 // CHECK13-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
14331 // CHECK13-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
14332 // CHECK13-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !12
14333 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP13:![0-9]+]]
14334 // CHECK13:       omp.inner.for.end111:
14335 // CHECK13-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
14336 // CHECK13-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
14337 // CHECK13-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
14338 // CHECK13-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
14339 // CHECK13-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
14340 // CHECK13-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
14341 // CHECK13-NEXT:    br label [[SIMD_IF_END116]]
14342 // CHECK13:       simd.if.end116:
14343 // CHECK13-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
14344 // CHECK13-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
14345 // CHECK13-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
14346 // CHECK13-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
14347 // CHECK13-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
14348 // CHECK13-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
14349 // CHECK13-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
14350 // CHECK13-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
14351 // CHECK13-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
14352 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
14353 // CHECK13-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
14354 // CHECK13-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
14355 // CHECK13-NEXT:    store i32 0, i32* [[I126]], align 4
14356 // CHECK13-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
14357 // CHECK13-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
14358 // CHECK13-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
14359 // CHECK13:       simd.if.then128:
14360 // CHECK13-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
14361 // CHECK13-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
14362 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
14363 // CHECK13:       omp.inner.for.cond131:
14364 // CHECK13-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
14365 // CHECK13-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !15
14366 // CHECK13-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
14367 // CHECK13-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
14368 // CHECK13:       omp.inner.for.body133:
14369 // CHECK13-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
14370 // CHECK13-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
14371 // CHECK13-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
14372 // CHECK13-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !15
14373 // CHECK13-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !15
14374 // CHECK13-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
14375 // CHECK13-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
14376 // CHECK13-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds double, double* [[TMP81]], i64 [[IDXPROM136]]
14377 // CHECK13-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX137]], align 8, !llvm.access.group !15
14378 // CHECK13-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !15
14379 // CHECK13-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
14380 // CHECK13-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
14381 // CHECK13-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds double, double* [[TMP84]], i64 [[IDXPROM138]]
14382 // CHECK13-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX139]], align 8, !llvm.access.group !15
14383 // CHECK13-NEXT:    [[ADD140:%.*]] = fadd double [[TMP83]], [[TMP86]]
14384 // CHECK13-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !15
14385 // CHECK13-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !15
14386 // CHECK13-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
14387 // CHECK13-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds double, double* [[TMP87]], i64 [[IDXPROM141]]
14388 // CHECK13-NEXT:    store double [[ADD140]], double* [[ARRAYIDX142]], align 8, !llvm.access.group !15
14389 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
14390 // CHECK13:       omp.body.continue143:
14391 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
14392 // CHECK13:       omp.inner.for.inc144:
14393 // CHECK13-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
14394 // CHECK13-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
14395 // CHECK13-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !15
14396 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP16:![0-9]+]]
14397 // CHECK13:       omp.inner.for.end146:
14398 // CHECK13-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
14399 // CHECK13-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
14400 // CHECK13-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
14401 // CHECK13-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
14402 // CHECK13-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
14403 // CHECK13-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
14404 // CHECK13-NEXT:    br label [[SIMD_IF_END151]]
14405 // CHECK13:       simd.if.end151:
14406 // CHECK13-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
14407 // CHECK13-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
14408 // CHECK13-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
14409 // CHECK13-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
14410 // CHECK13-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
14411 // CHECK13-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
14412 // CHECK13-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
14413 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
14414 // CHECK13-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
14415 // CHECK13-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
14416 // CHECK13-NEXT:    store i32 0, i32* [[I160]], align 4
14417 // CHECK13-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
14418 // CHECK13-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
14419 // CHECK13-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
14420 // CHECK13:       simd.if.then162:
14421 // CHECK13-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
14422 // CHECK13-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
14423 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
14424 // CHECK13:       omp.inner.for.cond165:
14425 // CHECK13-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
14426 // CHECK13-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !18
14427 // CHECK13-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
14428 // CHECK13-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
14429 // CHECK13:       omp.inner.for.body167:
14430 // CHECK13-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
14431 // CHECK13-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
14432 // CHECK13-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
14433 // CHECK13-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !18
14434 // CHECK13-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !18
14435 // CHECK13-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
14436 // CHECK13-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
14437 // CHECK13-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds double, double* [[TMP99]], i64 [[IDXPROM170]]
14438 // CHECK13-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX171]], align 8, !llvm.access.group !18
14439 // CHECK13-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !18
14440 // CHECK13-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
14441 // CHECK13-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
14442 // CHECK13-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds double, double* [[TMP102]], i64 [[IDXPROM172]]
14443 // CHECK13-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX173]], align 8, !llvm.access.group !18
14444 // CHECK13-NEXT:    [[ADD174:%.*]] = fadd double [[TMP101]], [[TMP104]]
14445 // CHECK13-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !18
14446 // CHECK13-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !18
14447 // CHECK13-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
14448 // CHECK13-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds double, double* [[TMP105]], i64 [[IDXPROM175]]
14449 // CHECK13-NEXT:    store double [[ADD174]], double* [[ARRAYIDX176]], align 8, !llvm.access.group !18
14450 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
14451 // CHECK13:       omp.body.continue177:
14452 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
14453 // CHECK13:       omp.inner.for.inc178:
14454 // CHECK13-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
14455 // CHECK13-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
14456 // CHECK13-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !18
14457 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP19:![0-9]+]]
14458 // CHECK13:       omp.inner.for.end180:
14459 // CHECK13-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
14460 // CHECK13-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
14461 // CHECK13-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
14462 // CHECK13-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
14463 // CHECK13-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
14464 // CHECK13-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
14465 // CHECK13-NEXT:    br label [[SIMD_IF_END185]]
14466 // CHECK13:       simd.if.end185:
14467 // CHECK13-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
14468 // CHECK13-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
14469 // CHECK13-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
14470 // CHECK13-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
14471 // CHECK13-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
14472 // CHECK13-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
14473 // CHECK13-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
14474 // CHECK13-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
14475 // CHECK13-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
14476 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
14477 // CHECK13-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
14478 // CHECK13-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
14479 // CHECK13-NEXT:    store i32 0, i32* [[I195]], align 4
14480 // CHECK13-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
14481 // CHECK13-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
14482 // CHECK13-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
14483 // CHECK13:       simd.if.then197:
14484 // CHECK13-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
14485 // CHECK13-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
14486 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
14487 // CHECK13:       omp.inner.for.cond200:
14488 // CHECK13-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
14489 // CHECK13-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !21
14490 // CHECK13-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
14491 // CHECK13-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
14492 // CHECK13:       omp.inner.for.body202:
14493 // CHECK13-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
14494 // CHECK13-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
14495 // CHECK13-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
14496 // CHECK13-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !21
14497 // CHECK13-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 8, !llvm.access.group !21
14498 // CHECK13-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
14499 // CHECK13-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
14500 // CHECK13-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds double, double* [[TMP118]], i64 [[IDXPROM205]]
14501 // CHECK13-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX206]], align 8, !llvm.access.group !21
14502 // CHECK13-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 8, !llvm.access.group !21
14503 // CHECK13-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
14504 // CHECK13-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
14505 // CHECK13-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds double, double* [[TMP121]], i64 [[IDXPROM207]]
14506 // CHECK13-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX208]], align 8, !llvm.access.group !21
14507 // CHECK13-NEXT:    [[ADD209:%.*]] = fadd double [[TMP120]], [[TMP123]]
14508 // CHECK13-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 8, !llvm.access.group !21
14509 // CHECK13-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !21
14510 // CHECK13-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
14511 // CHECK13-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds double, double* [[TMP124]], i64 [[IDXPROM210]]
14512 // CHECK13-NEXT:    store double [[ADD209]], double* [[ARRAYIDX211]], align 8, !llvm.access.group !21
14513 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
14514 // CHECK13:       omp.body.continue212:
14515 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
14516 // CHECK13:       omp.inner.for.inc213:
14517 // CHECK13-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
14518 // CHECK13-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
14519 // CHECK13-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !21
14520 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP22:![0-9]+]]
14521 // CHECK13:       omp.inner.for.end215:
14522 // CHECK13-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
14523 // CHECK13-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
14524 // CHECK13-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
14525 // CHECK13-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
14526 // CHECK13-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
14527 // CHECK13-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
14528 // CHECK13-NEXT:    br label [[SIMD_IF_END220]]
14529 // CHECK13:       simd.if.end220:
14530 // CHECK13-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v()
14531 // CHECK13-NEXT:    ret i32 [[CALL]]
14532 //
14533 //
14534 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
14535 // CHECK13-SAME: () #[[ATTR1:[0-9]+]] comdat {
14536 // CHECK13-NEXT:  entry:
14537 // CHECK13-NEXT:    [[A:%.*]] = alloca i32*, align 8
14538 // CHECK13-NEXT:    [[B:%.*]] = alloca i32*, align 8
14539 // CHECK13-NEXT:    [[C:%.*]] = alloca i32*, align 8
14540 // CHECK13-NEXT:    [[N:%.*]] = alloca i32, align 4
14541 // CHECK13-NEXT:    [[CH:%.*]] = alloca i32, align 4
14542 // CHECK13-NEXT:    [[TMP:%.*]] = alloca i32, align 4
14543 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14544 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14545 // CHECK13-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
14546 // CHECK13-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
14547 // CHECK13-NEXT:    [[I:%.*]] = alloca i32, align 4
14548 // CHECK13-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
14549 // CHECK13-NEXT:    [[I3:%.*]] = alloca i32, align 4
14550 // CHECK13-NEXT:    [[_TMP15:%.*]] = alloca i32, align 4
14551 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4
14552 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
14553 // CHECK13-NEXT:    [[DOTOMP_LB21:%.*]] = alloca i32, align 4
14554 // CHECK13-NEXT:    [[DOTOMP_UB22:%.*]] = alloca i32, align 4
14555 // CHECK13-NEXT:    [[I23:%.*]] = alloca i32, align 4
14556 // CHECK13-NEXT:    [[DOTOMP_IV26:%.*]] = alloca i32, align 4
14557 // CHECK13-NEXT:    [[I27:%.*]] = alloca i32, align 4
14558 // CHECK13-NEXT:    [[_TMP49:%.*]] = alloca i32, align 4
14559 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
14560 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4
14561 // CHECK13-NEXT:    [[DOTOMP_LB55:%.*]] = alloca i32, align 4
14562 // CHECK13-NEXT:    [[DOTOMP_UB56:%.*]] = alloca i32, align 4
14563 // CHECK13-NEXT:    [[I57:%.*]] = alloca i32, align 4
14564 // CHECK13-NEXT:    [[DOTOMP_IV60:%.*]] = alloca i32, align 4
14565 // CHECK13-NEXT:    [[I61:%.*]] = alloca i32, align 4
14566 // CHECK13-NEXT:    [[_TMP83:%.*]] = alloca i32, align 4
14567 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_84:%.*]] = alloca i32, align 4
14568 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_85:%.*]] = alloca i32, align 4
14569 // CHECK13-NEXT:    [[DOTOMP_LB89:%.*]] = alloca i32, align 4
14570 // CHECK13-NEXT:    [[DOTOMP_UB90:%.*]] = alloca i32, align 4
14571 // CHECK13-NEXT:    [[I91:%.*]] = alloca i32, align 4
14572 // CHECK13-NEXT:    [[DOTOMP_IV94:%.*]] = alloca i32, align 4
14573 // CHECK13-NEXT:    [[I95:%.*]] = alloca i32, align 4
14574 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_117:%.*]] = alloca i32, align 4
14575 // CHECK13-NEXT:    [[_TMP118:%.*]] = alloca i32, align 4
14576 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_119:%.*]] = alloca i32, align 4
14577 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_120:%.*]] = alloca i32, align 4
14578 // CHECK13-NEXT:    [[DOTOMP_LB124:%.*]] = alloca i32, align 4
14579 // CHECK13-NEXT:    [[DOTOMP_UB125:%.*]] = alloca i32, align 4
14580 // CHECK13-NEXT:    [[I126:%.*]] = alloca i32, align 4
14581 // CHECK13-NEXT:    [[DOTOMP_IV129:%.*]] = alloca i32, align 4
14582 // CHECK13-NEXT:    [[I130:%.*]] = alloca i32, align 4
14583 // CHECK13-NEXT:    [[_TMP152:%.*]] = alloca i32, align 4
14584 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_153:%.*]] = alloca i32, align 4
14585 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_154:%.*]] = alloca i32, align 4
14586 // CHECK13-NEXT:    [[DOTOMP_LB158:%.*]] = alloca i32, align 4
14587 // CHECK13-NEXT:    [[DOTOMP_UB159:%.*]] = alloca i32, align 4
14588 // CHECK13-NEXT:    [[I160:%.*]] = alloca i32, align 4
14589 // CHECK13-NEXT:    [[DOTOMP_IV163:%.*]] = alloca i32, align 4
14590 // CHECK13-NEXT:    [[I164:%.*]] = alloca i32, align 4
14591 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_186:%.*]] = alloca i32, align 4
14592 // CHECK13-NEXT:    [[_TMP187:%.*]] = alloca i32, align 4
14593 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_188:%.*]] = alloca i32, align 4
14594 // CHECK13-NEXT:    [[DOTCAPTURE_EXPR_189:%.*]] = alloca i32, align 4
14595 // CHECK13-NEXT:    [[DOTOMP_LB193:%.*]] = alloca i32, align 4
14596 // CHECK13-NEXT:    [[DOTOMP_UB194:%.*]] = alloca i32, align 4
14597 // CHECK13-NEXT:    [[I195:%.*]] = alloca i32, align 4
14598 // CHECK13-NEXT:    [[DOTOMP_IV198:%.*]] = alloca i32, align 4
14599 // CHECK13-NEXT:    [[I199:%.*]] = alloca i32, align 4
14600 // CHECK13-NEXT:    store i32 10000, i32* [[N]], align 4
14601 // CHECK13-NEXT:    store i32 100, i32* [[CH]], align 4
14602 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
14603 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
14604 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14605 // CHECK13-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
14606 // CHECK13-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14607 // CHECK13-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14608 // CHECK13-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
14609 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
14610 // CHECK13-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
14611 // CHECK13-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
14612 // CHECK13-NEXT:    store i32 0, i32* [[I]], align 4
14613 // CHECK13-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14614 // CHECK13-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
14615 // CHECK13-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
14616 // CHECK13:       simd.if.then:
14617 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
14618 // CHECK13-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
14619 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
14620 // CHECK13:       omp.inner.for.cond:
14621 // CHECK13-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
14622 // CHECK13-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
14623 // CHECK13-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
14624 // CHECK13-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14625 // CHECK13:       omp.inner.for.body:
14626 // CHECK13-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
14627 // CHECK13-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
14628 // CHECK13-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14629 // CHECK13-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !24
14630 // CHECK13-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !24
14631 // CHECK13-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
14632 // CHECK13-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
14633 // CHECK13-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM]]
14634 // CHECK13-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
14635 // CHECK13-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !24
14636 // CHECK13-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
14637 // CHECK13-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP12]] to i64
14638 // CHECK13-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[IDXPROM5]]
14639 // CHECK13-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !24
14640 // CHECK13-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
14641 // CHECK13-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !24
14642 // CHECK13-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !24
14643 // CHECK13-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[TMP15]] to i64
14644 // CHECK13-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i64 [[IDXPROM8]]
14645 // CHECK13-NEXT:    store i32 [[ADD7]], i32* [[ARRAYIDX9]], align 4, !llvm.access.group !24
14646 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
14647 // CHECK13:       omp.body.continue:
14648 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
14649 // CHECK13:       omp.inner.for.inc:
14650 // CHECK13-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
14651 // CHECK13-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
14652 // CHECK13-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
14653 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
14654 // CHECK13:       omp.inner.for.end:
14655 // CHECK13-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
14656 // CHECK13-NEXT:    [[SUB11:%.*]] = sub nsw i32 [[TMP17]], 0
14657 // CHECK13-NEXT:    [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
14658 // CHECK13-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1
14659 // CHECK13-NEXT:    [[ADD14:%.*]] = add nsw i32 0, [[MUL13]]
14660 // CHECK13-NEXT:    store i32 [[ADD14]], i32* [[I3]], align 4
14661 // CHECK13-NEXT:    br label [[SIMD_IF_END]]
14662 // CHECK13:       simd.if.end:
14663 // CHECK13-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
14664 // CHECK13-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_16]], align 4
14665 // CHECK13-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
14666 // CHECK13-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
14667 // CHECK13-NEXT:    [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
14668 // CHECK13-NEXT:    [[SUB20:%.*]] = sub nsw i32 [[DIV19]], 1
14669 // CHECK13-NEXT:    store i32 [[SUB20]], i32* [[DOTCAPTURE_EXPR_17]], align 4
14670 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB21]], align 4
14671 // CHECK13-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
14672 // CHECK13-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB22]], align 4
14673 // CHECK13-NEXT:    store i32 0, i32* [[I23]], align 4
14674 // CHECK13-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
14675 // CHECK13-NEXT:    [[CMP24:%.*]] = icmp slt i32 0, [[TMP21]]
14676 // CHECK13-NEXT:    br i1 [[CMP24]], label [[SIMD_IF_THEN25:%.*]], label [[SIMD_IF_END48:%.*]]
14677 // CHECK13:       simd.if.then25:
14678 // CHECK13-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4
14679 // CHECK13-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV26]], align 4
14680 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28:%.*]]
14681 // CHECK13:       omp.inner.for.cond28:
14682 // CHECK13-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
14683 // CHECK13-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !27
14684 // CHECK13-NEXT:    [[CMP29:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
14685 // CHECK13-NEXT:    br i1 [[CMP29]], label [[OMP_INNER_FOR_BODY30:%.*]], label [[OMP_INNER_FOR_END43:%.*]]
14686 // CHECK13:       omp.inner.for.body30:
14687 // CHECK13-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
14688 // CHECK13-NEXT:    [[MUL31:%.*]] = mul nsw i32 [[TMP25]], 1
14689 // CHECK13-NEXT:    [[ADD32:%.*]] = add nsw i32 0, [[MUL31]]
14690 // CHECK13-NEXT:    store i32 [[ADD32]], i32* [[I27]], align 4, !llvm.access.group !27
14691 // CHECK13-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !27
14692 // CHECK13-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
14693 // CHECK13-NEXT:    [[IDXPROM33:%.*]] = sext i32 [[TMP27]] to i64
14694 // CHECK13-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i64 [[IDXPROM33]]
14695 // CHECK13-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX34]], align 4, !llvm.access.group !27
14696 // CHECK13-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !27
14697 // CHECK13-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
14698 // CHECK13-NEXT:    [[IDXPROM35:%.*]] = sext i32 [[TMP30]] to i64
14699 // CHECK13-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i64 [[IDXPROM35]]
14700 // CHECK13-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX36]], align 4, !llvm.access.group !27
14701 // CHECK13-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
14702 // CHECK13-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !27
14703 // CHECK13-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I27]], align 4, !llvm.access.group !27
14704 // CHECK13-NEXT:    [[IDXPROM38:%.*]] = sext i32 [[TMP33]] to i64
14705 // CHECK13-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i64 [[IDXPROM38]]
14706 // CHECK13-NEXT:    store i32 [[ADD37]], i32* [[ARRAYIDX39]], align 4, !llvm.access.group !27
14707 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE40:%.*]]
14708 // CHECK13:       omp.body.continue40:
14709 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC41:%.*]]
14710 // CHECK13:       omp.inner.for.inc41:
14711 // CHECK13-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
14712 // CHECK13-NEXT:    [[ADD42:%.*]] = add nsw i32 [[TMP34]], 1
14713 // CHECK13-NEXT:    store i32 [[ADD42]], i32* [[DOTOMP_IV26]], align 4, !llvm.access.group !27
14714 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND28]], !llvm.loop [[LOOP28:![0-9]+]]
14715 // CHECK13:       omp.inner.for.end43:
14716 // CHECK13-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4
14717 // CHECK13-NEXT:    [[SUB44:%.*]] = sub nsw i32 [[TMP35]], 0
14718 // CHECK13-NEXT:    [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
14719 // CHECK13-NEXT:    [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1
14720 // CHECK13-NEXT:    [[ADD47:%.*]] = add nsw i32 0, [[MUL46]]
14721 // CHECK13-NEXT:    store i32 [[ADD47]], i32* [[I27]], align 4
14722 // CHECK13-NEXT:    br label [[SIMD_IF_END48]]
14723 // CHECK13:       simd.if.end48:
14724 // CHECK13-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
14725 // CHECK13-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_50]], align 4
14726 // CHECK13-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
14727 // CHECK13-NEXT:    [[SUB52:%.*]] = sub nsw i32 [[TMP37]], 0
14728 // CHECK13-NEXT:    [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1
14729 // CHECK13-NEXT:    [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1
14730 // CHECK13-NEXT:    store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4
14731 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB55]], align 4
14732 // CHECK13-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4
14733 // CHECK13-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB56]], align 4
14734 // CHECK13-NEXT:    store i32 0, i32* [[I57]], align 4
14735 // CHECK13-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
14736 // CHECK13-NEXT:    [[CMP58:%.*]] = icmp slt i32 0, [[TMP39]]
14737 // CHECK13-NEXT:    br i1 [[CMP58]], label [[SIMD_IF_THEN59:%.*]], label [[SIMD_IF_END82:%.*]]
14738 // CHECK13:       simd.if.then59:
14739 // CHECK13-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB55]], align 4
14740 // CHECK13-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV60]], align 4
14741 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62:%.*]]
14742 // CHECK13:       omp.inner.for.cond62:
14743 // CHECK13-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
14744 // CHECK13-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB56]], align 4, !llvm.access.group !30
14745 // CHECK13-NEXT:    [[CMP63:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
14746 // CHECK13-NEXT:    br i1 [[CMP63]], label [[OMP_INNER_FOR_BODY64:%.*]], label [[OMP_INNER_FOR_END77:%.*]]
14747 // CHECK13:       omp.inner.for.body64:
14748 // CHECK13-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
14749 // CHECK13-NEXT:    [[MUL65:%.*]] = mul nsw i32 [[TMP43]], 1
14750 // CHECK13-NEXT:    [[ADD66:%.*]] = add nsw i32 0, [[MUL65]]
14751 // CHECK13-NEXT:    store i32 [[ADD66]], i32* [[I61]], align 4, !llvm.access.group !30
14752 // CHECK13-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !30
14753 // CHECK13-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
14754 // CHECK13-NEXT:    [[IDXPROM67:%.*]] = sext i32 [[TMP45]] to i64
14755 // CHECK13-NEXT:    [[ARRAYIDX68:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i64 [[IDXPROM67]]
14756 // CHECK13-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX68]], align 4, !llvm.access.group !30
14757 // CHECK13-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !30
14758 // CHECK13-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
14759 // CHECK13-NEXT:    [[IDXPROM69:%.*]] = sext i32 [[TMP48]] to i64
14760 // CHECK13-NEXT:    [[ARRAYIDX70:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i64 [[IDXPROM69]]
14761 // CHECK13-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX70]], align 4, !llvm.access.group !30
14762 // CHECK13-NEXT:    [[ADD71:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
14763 // CHECK13-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !30
14764 // CHECK13-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I61]], align 4, !llvm.access.group !30
14765 // CHECK13-NEXT:    [[IDXPROM72:%.*]] = sext i32 [[TMP51]] to i64
14766 // CHECK13-NEXT:    [[ARRAYIDX73:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i64 [[IDXPROM72]]
14767 // CHECK13-NEXT:    store i32 [[ADD71]], i32* [[ARRAYIDX73]], align 4, !llvm.access.group !30
14768 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE74:%.*]]
14769 // CHECK13:       omp.body.continue74:
14770 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC75:%.*]]
14771 // CHECK13:       omp.inner.for.inc75:
14772 // CHECK13-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
14773 // CHECK13-NEXT:    [[ADD76:%.*]] = add nsw i32 [[TMP52]], 1
14774 // CHECK13-NEXT:    store i32 [[ADD76]], i32* [[DOTOMP_IV60]], align 4, !llvm.access.group !30
14775 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND62]], !llvm.loop [[LOOP31:![0-9]+]]
14776 // CHECK13:       omp.inner.for.end77:
14777 // CHECK13-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
14778 // CHECK13-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP53]], 0
14779 // CHECK13-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
14780 // CHECK13-NEXT:    [[MUL80:%.*]] = mul nsw i32 [[DIV79]], 1
14781 // CHECK13-NEXT:    [[ADD81:%.*]] = add nsw i32 0, [[MUL80]]
14782 // CHECK13-NEXT:    store i32 [[ADD81]], i32* [[I61]], align 4
14783 // CHECK13-NEXT:    br label [[SIMD_IF_END82]]
14784 // CHECK13:       simd.if.end82:
14785 // CHECK13-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
14786 // CHECK13-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_84]], align 4
14787 // CHECK13-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
14788 // CHECK13-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[TMP55]], 0
14789 // CHECK13-NEXT:    [[DIV87:%.*]] = sdiv i32 [[SUB86]], 1
14790 // CHECK13-NEXT:    [[SUB88:%.*]] = sub nsw i32 [[DIV87]], 1
14791 // CHECK13-NEXT:    store i32 [[SUB88]], i32* [[DOTCAPTURE_EXPR_85]], align 4
14792 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB89]], align 4
14793 // CHECK13-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_85]], align 4
14794 // CHECK13-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB90]], align 4
14795 // CHECK13-NEXT:    store i32 0, i32* [[I91]], align 4
14796 // CHECK13-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
14797 // CHECK13-NEXT:    [[CMP92:%.*]] = icmp slt i32 0, [[TMP57]]
14798 // CHECK13-NEXT:    br i1 [[CMP92]], label [[SIMD_IF_THEN93:%.*]], label [[SIMD_IF_END116:%.*]]
14799 // CHECK13:       simd.if.then93:
14800 // CHECK13-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB89]], align 4
14801 // CHECK13-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV94]], align 4
14802 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96:%.*]]
14803 // CHECK13:       omp.inner.for.cond96:
14804 // CHECK13-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
14805 // CHECK13-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB90]], align 4, !llvm.access.group !33
14806 // CHECK13-NEXT:    [[CMP97:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
14807 // CHECK13-NEXT:    br i1 [[CMP97]], label [[OMP_INNER_FOR_BODY98:%.*]], label [[OMP_INNER_FOR_END111:%.*]]
14808 // CHECK13:       omp.inner.for.body98:
14809 // CHECK13-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
14810 // CHECK13-NEXT:    [[MUL99:%.*]] = mul nsw i32 [[TMP61]], 1
14811 // CHECK13-NEXT:    [[ADD100:%.*]] = add nsw i32 0, [[MUL99]]
14812 // CHECK13-NEXT:    store i32 [[ADD100]], i32* [[I95]], align 4, !llvm.access.group !33
14813 // CHECK13-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !33
14814 // CHECK13-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
14815 // CHECK13-NEXT:    [[IDXPROM101:%.*]] = sext i32 [[TMP63]] to i64
14816 // CHECK13-NEXT:    [[ARRAYIDX102:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i64 [[IDXPROM101]]
14817 // CHECK13-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX102]], align 4, !llvm.access.group !33
14818 // CHECK13-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !33
14819 // CHECK13-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
14820 // CHECK13-NEXT:    [[IDXPROM103:%.*]] = sext i32 [[TMP66]] to i64
14821 // CHECK13-NEXT:    [[ARRAYIDX104:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i64 [[IDXPROM103]]
14822 // CHECK13-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX104]], align 4, !llvm.access.group !33
14823 // CHECK13-NEXT:    [[ADD105:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
14824 // CHECK13-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !33
14825 // CHECK13-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I95]], align 4, !llvm.access.group !33
14826 // CHECK13-NEXT:    [[IDXPROM106:%.*]] = sext i32 [[TMP69]] to i64
14827 // CHECK13-NEXT:    [[ARRAYIDX107:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i64 [[IDXPROM106]]
14828 // CHECK13-NEXT:    store i32 [[ADD105]], i32* [[ARRAYIDX107]], align 4, !llvm.access.group !33
14829 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE108:%.*]]
14830 // CHECK13:       omp.body.continue108:
14831 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC109:%.*]]
14832 // CHECK13:       omp.inner.for.inc109:
14833 // CHECK13-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
14834 // CHECK13-NEXT:    [[ADD110:%.*]] = add nsw i32 [[TMP70]], 1
14835 // CHECK13-NEXT:    store i32 [[ADD110]], i32* [[DOTOMP_IV94]], align 4, !llvm.access.group !33
14836 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND96]], !llvm.loop [[LOOP34:![0-9]+]]
14837 // CHECK13:       omp.inner.for.end111:
14838 // CHECK13-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_84]], align 4
14839 // CHECK13-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[TMP71]], 0
14840 // CHECK13-NEXT:    [[DIV113:%.*]] = sdiv i32 [[SUB112]], 1
14841 // CHECK13-NEXT:    [[MUL114:%.*]] = mul nsw i32 [[DIV113]], 1
14842 // CHECK13-NEXT:    [[ADD115:%.*]] = add nsw i32 0, [[MUL114]]
14843 // CHECK13-NEXT:    store i32 [[ADD115]], i32* [[I95]], align 4
14844 // CHECK13-NEXT:    br label [[SIMD_IF_END116]]
14845 // CHECK13:       simd.if.end116:
14846 // CHECK13-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
14847 // CHECK13-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_117]], align 4
14848 // CHECK13-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
14849 // CHECK13-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_119]], align 4
14850 // CHECK13-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
14851 // CHECK13-NEXT:    [[SUB121:%.*]] = sub nsw i32 [[TMP74]], 0
14852 // CHECK13-NEXT:    [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1
14853 // CHECK13-NEXT:    [[SUB123:%.*]] = sub nsw i32 [[DIV122]], 1
14854 // CHECK13-NEXT:    store i32 [[SUB123]], i32* [[DOTCAPTURE_EXPR_120]], align 4
14855 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB124]], align 4
14856 // CHECK13-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_120]], align 4
14857 // CHECK13-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB125]], align 4
14858 // CHECK13-NEXT:    store i32 0, i32* [[I126]], align 4
14859 // CHECK13-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
14860 // CHECK13-NEXT:    [[CMP127:%.*]] = icmp slt i32 0, [[TMP76]]
14861 // CHECK13-NEXT:    br i1 [[CMP127]], label [[SIMD_IF_THEN128:%.*]], label [[SIMD_IF_END151:%.*]]
14862 // CHECK13:       simd.if.then128:
14863 // CHECK13-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB124]], align 4
14864 // CHECK13-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV129]], align 4
14865 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131:%.*]]
14866 // CHECK13:       omp.inner.for.cond131:
14867 // CHECK13-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
14868 // CHECK13-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB125]], align 4, !llvm.access.group !36
14869 // CHECK13-NEXT:    [[CMP132:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
14870 // CHECK13-NEXT:    br i1 [[CMP132]], label [[OMP_INNER_FOR_BODY133:%.*]], label [[OMP_INNER_FOR_END146:%.*]]
14871 // CHECK13:       omp.inner.for.body133:
14872 // CHECK13-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
14873 // CHECK13-NEXT:    [[MUL134:%.*]] = mul nsw i32 [[TMP80]], 1
14874 // CHECK13-NEXT:    [[ADD135:%.*]] = add nsw i32 0, [[MUL134]]
14875 // CHECK13-NEXT:    store i32 [[ADD135]], i32* [[I130]], align 4, !llvm.access.group !36
14876 // CHECK13-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !36
14877 // CHECK13-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
14878 // CHECK13-NEXT:    [[IDXPROM136:%.*]] = sext i32 [[TMP82]] to i64
14879 // CHECK13-NEXT:    [[ARRAYIDX137:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i64 [[IDXPROM136]]
14880 // CHECK13-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX137]], align 4, !llvm.access.group !36
14881 // CHECK13-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !36
14882 // CHECK13-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
14883 // CHECK13-NEXT:    [[IDXPROM138:%.*]] = sext i32 [[TMP85]] to i64
14884 // CHECK13-NEXT:    [[ARRAYIDX139:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i64 [[IDXPROM138]]
14885 // CHECK13-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX139]], align 4, !llvm.access.group !36
14886 // CHECK13-NEXT:    [[ADD140:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
14887 // CHECK13-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !36
14888 // CHECK13-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I130]], align 4, !llvm.access.group !36
14889 // CHECK13-NEXT:    [[IDXPROM141:%.*]] = sext i32 [[TMP88]] to i64
14890 // CHECK13-NEXT:    [[ARRAYIDX142:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i64 [[IDXPROM141]]
14891 // CHECK13-NEXT:    store i32 [[ADD140]], i32* [[ARRAYIDX142]], align 4, !llvm.access.group !36
14892 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE143:%.*]]
14893 // CHECK13:       omp.body.continue143:
14894 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC144:%.*]]
14895 // CHECK13:       omp.inner.for.inc144:
14896 // CHECK13-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
14897 // CHECK13-NEXT:    [[ADD145:%.*]] = add nsw i32 [[TMP89]], 1
14898 // CHECK13-NEXT:    store i32 [[ADD145]], i32* [[DOTOMP_IV129]], align 4, !llvm.access.group !36
14899 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND131]], !llvm.loop [[LOOP37:![0-9]+]]
14900 // CHECK13:       omp.inner.for.end146:
14901 // CHECK13-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_119]], align 4
14902 // CHECK13-NEXT:    [[SUB147:%.*]] = sub nsw i32 [[TMP90]], 0
14903 // CHECK13-NEXT:    [[DIV148:%.*]] = sdiv i32 [[SUB147]], 1
14904 // CHECK13-NEXT:    [[MUL149:%.*]] = mul nsw i32 [[DIV148]], 1
14905 // CHECK13-NEXT:    [[ADD150:%.*]] = add nsw i32 0, [[MUL149]]
14906 // CHECK13-NEXT:    store i32 [[ADD150]], i32* [[I130]], align 4
14907 // CHECK13-NEXT:    br label [[SIMD_IF_END151]]
14908 // CHECK13:       simd.if.end151:
14909 // CHECK13-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
14910 // CHECK13-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_153]], align 4
14911 // CHECK13-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
14912 // CHECK13-NEXT:    [[SUB155:%.*]] = sub nsw i32 [[TMP92]], 0
14913 // CHECK13-NEXT:    [[DIV156:%.*]] = sdiv i32 [[SUB155]], 1
14914 // CHECK13-NEXT:    [[SUB157:%.*]] = sub nsw i32 [[DIV156]], 1
14915 // CHECK13-NEXT:    store i32 [[SUB157]], i32* [[DOTCAPTURE_EXPR_154]], align 4
14916 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB158]], align 4
14917 // CHECK13-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_154]], align 4
14918 // CHECK13-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB159]], align 4
14919 // CHECK13-NEXT:    store i32 0, i32* [[I160]], align 4
14920 // CHECK13-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
14921 // CHECK13-NEXT:    [[CMP161:%.*]] = icmp slt i32 0, [[TMP94]]
14922 // CHECK13-NEXT:    br i1 [[CMP161]], label [[SIMD_IF_THEN162:%.*]], label [[SIMD_IF_END185:%.*]]
14923 // CHECK13:       simd.if.then162:
14924 // CHECK13-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB158]], align 4
14925 // CHECK13-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV163]], align 4
14926 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165:%.*]]
14927 // CHECK13:       omp.inner.for.cond165:
14928 // CHECK13-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
14929 // CHECK13-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB159]], align 4, !llvm.access.group !39
14930 // CHECK13-NEXT:    [[CMP166:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
14931 // CHECK13-NEXT:    br i1 [[CMP166]], label [[OMP_INNER_FOR_BODY167:%.*]], label [[OMP_INNER_FOR_END180:%.*]]
14932 // CHECK13:       omp.inner.for.body167:
14933 // CHECK13-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
14934 // CHECK13-NEXT:    [[MUL168:%.*]] = mul nsw i32 [[TMP98]], 1
14935 // CHECK13-NEXT:    [[ADD169:%.*]] = add nsw i32 0, [[MUL168]]
14936 // CHECK13-NEXT:    store i32 [[ADD169]], i32* [[I164]], align 4, !llvm.access.group !39
14937 // CHECK13-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !39
14938 // CHECK13-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
14939 // CHECK13-NEXT:    [[IDXPROM170:%.*]] = sext i32 [[TMP100]] to i64
14940 // CHECK13-NEXT:    [[ARRAYIDX171:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i64 [[IDXPROM170]]
14941 // CHECK13-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX171]], align 4, !llvm.access.group !39
14942 // CHECK13-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !39
14943 // CHECK13-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
14944 // CHECK13-NEXT:    [[IDXPROM172:%.*]] = sext i32 [[TMP103]] to i64
14945 // CHECK13-NEXT:    [[ARRAYIDX173:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i64 [[IDXPROM172]]
14946 // CHECK13-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX173]], align 4, !llvm.access.group !39
14947 // CHECK13-NEXT:    [[ADD174:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
14948 // CHECK13-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !39
14949 // CHECK13-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I164]], align 4, !llvm.access.group !39
14950 // CHECK13-NEXT:    [[IDXPROM175:%.*]] = sext i32 [[TMP106]] to i64
14951 // CHECK13-NEXT:    [[ARRAYIDX176:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i64 [[IDXPROM175]]
14952 // CHECK13-NEXT:    store i32 [[ADD174]], i32* [[ARRAYIDX176]], align 4, !llvm.access.group !39
14953 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE177:%.*]]
14954 // CHECK13:       omp.body.continue177:
14955 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC178:%.*]]
14956 // CHECK13:       omp.inner.for.inc178:
14957 // CHECK13-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
14958 // CHECK13-NEXT:    [[ADD179:%.*]] = add nsw i32 [[TMP107]], 1
14959 // CHECK13-NEXT:    store i32 [[ADD179]], i32* [[DOTOMP_IV163]], align 4, !llvm.access.group !39
14960 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND165]], !llvm.loop [[LOOP40:![0-9]+]]
14961 // CHECK13:       omp.inner.for.end180:
14962 // CHECK13-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_153]], align 4
14963 // CHECK13-NEXT:    [[SUB181:%.*]] = sub nsw i32 [[TMP108]], 0
14964 // CHECK13-NEXT:    [[DIV182:%.*]] = sdiv i32 [[SUB181]], 1
14965 // CHECK13-NEXT:    [[MUL183:%.*]] = mul nsw i32 [[DIV182]], 1
14966 // CHECK13-NEXT:    [[ADD184:%.*]] = add nsw i32 0, [[MUL183]]
14967 // CHECK13-NEXT:    store i32 [[ADD184]], i32* [[I164]], align 4
14968 // CHECK13-NEXT:    br label [[SIMD_IF_END185]]
14969 // CHECK13:       simd.if.end185:
14970 // CHECK13-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
14971 // CHECK13-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_186]], align 4
14972 // CHECK13-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
14973 // CHECK13-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_188]], align 4
14974 // CHECK13-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
14975 // CHECK13-NEXT:    [[SUB190:%.*]] = sub nsw i32 [[TMP111]], 0
14976 // CHECK13-NEXT:    [[DIV191:%.*]] = sdiv i32 [[SUB190]], 1
14977 // CHECK13-NEXT:    [[SUB192:%.*]] = sub nsw i32 [[DIV191]], 1
14978 // CHECK13-NEXT:    store i32 [[SUB192]], i32* [[DOTCAPTURE_EXPR_189]], align 4
14979 // CHECK13-NEXT:    store i32 0, i32* [[DOTOMP_LB193]], align 4
14980 // CHECK13-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_189]], align 4
14981 // CHECK13-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB194]], align 4
14982 // CHECK13-NEXT:    store i32 0, i32* [[I195]], align 4
14983 // CHECK13-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
14984 // CHECK13-NEXT:    [[CMP196:%.*]] = icmp slt i32 0, [[TMP113]]
14985 // CHECK13-NEXT:    br i1 [[CMP196]], label [[SIMD_IF_THEN197:%.*]], label [[SIMD_IF_END220:%.*]]
14986 // CHECK13:       simd.if.then197:
14987 // CHECK13-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB193]], align 4
14988 // CHECK13-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV198]], align 4
14989 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200:%.*]]
14990 // CHECK13:       omp.inner.for.cond200:
14991 // CHECK13-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
14992 // CHECK13-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB194]], align 4, !llvm.access.group !42
14993 // CHECK13-NEXT:    [[CMP201:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
14994 // CHECK13-NEXT:    br i1 [[CMP201]], label [[OMP_INNER_FOR_BODY202:%.*]], label [[OMP_INNER_FOR_END215:%.*]]
14995 // CHECK13:       omp.inner.for.body202:
14996 // CHECK13-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
14997 // CHECK13-NEXT:    [[MUL203:%.*]] = mul nsw i32 [[TMP117]], 1
14998 // CHECK13-NEXT:    [[ADD204:%.*]] = add nsw i32 0, [[MUL203]]
14999 // CHECK13-NEXT:    store i32 [[ADD204]], i32* [[I199]], align 4, !llvm.access.group !42
15000 // CHECK13-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 8, !llvm.access.group !42
15001 // CHECK13-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
15002 // CHECK13-NEXT:    [[IDXPROM205:%.*]] = sext i32 [[TMP119]] to i64
15003 // CHECK13-NEXT:    [[ARRAYIDX206:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i64 [[IDXPROM205]]
15004 // CHECK13-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX206]], align 4, !llvm.access.group !42
15005 // CHECK13-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 8, !llvm.access.group !42
15006 // CHECK13-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
15007 // CHECK13-NEXT:    [[IDXPROM207:%.*]] = sext i32 [[TMP122]] to i64
15008 // CHECK13-NEXT:    [[ARRAYIDX208:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i64 [[IDXPROM207]]
15009 // CHECK13-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX208]], align 4, !llvm.access.group !42
15010 // CHECK13-NEXT:    [[ADD209:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
15011 // CHECK13-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 8, !llvm.access.group !42
15012 // CHECK13-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I199]], align 4, !llvm.access.group !42
15013 // CHECK13-NEXT:    [[IDXPROM210:%.*]] = sext i32 [[TMP125]] to i64
15014 // CHECK13-NEXT:    [[ARRAYIDX211:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i64 [[IDXPROM210]]
15015 // CHECK13-NEXT:    store i32 [[ADD209]], i32* [[ARRAYIDX211]], align 4, !llvm.access.group !42
15016 // CHECK13-NEXT:    br label [[OMP_BODY_CONTINUE212:%.*]]
15017 // CHECK13:       omp.body.continue212:
15018 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_INC213:%.*]]
15019 // CHECK13:       omp.inner.for.inc213:
15020 // CHECK13-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
15021 // CHECK13-NEXT:    [[ADD214:%.*]] = add nsw i32 [[TMP126]], 1
15022 // CHECK13-NEXT:    store i32 [[ADD214]], i32* [[DOTOMP_IV198]], align 4, !llvm.access.group !42
15023 // CHECK13-NEXT:    br label [[OMP_INNER_FOR_COND200]], !llvm.loop [[LOOP43:![0-9]+]]
15024 // CHECK13:       omp.inner.for.end215:
15025 // CHECK13-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_188]], align 4
15026 // CHECK13-NEXT:    [[SUB216:%.*]] = sub nsw i32 [[TMP127]], 0
15027 // CHECK13-NEXT:    [[DIV217:%.*]] = sdiv i32 [[SUB216]], 1
15028 // CHECK13-NEXT:    [[MUL218:%.*]] = mul nsw i32 [[DIV217]], 1
15029 // CHECK13-NEXT:    [[ADD219:%.*]] = add nsw i32 0, [[MUL218]]
15030 // CHECK13-NEXT:    store i32 [[ADD219]], i32* [[I199]], align 4
15031 // CHECK13-NEXT:    br label [[SIMD_IF_END220]]
15032 // CHECK13:       simd.if.end220:
15033 // CHECK13-NEXT:    ret i32 0
15034 //
15035 //
15036 // CHECK15-LABEL: define {{[^@]+}}@main
15037 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] {
15038 // CHECK15-NEXT:  entry:
15039 // CHECK15-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
15040 // CHECK15-NEXT:    [[A:%.*]] = alloca double*, align 4
15041 // CHECK15-NEXT:    [[B:%.*]] = alloca double*, align 4
15042 // CHECK15-NEXT:    [[C:%.*]] = alloca double*, align 4
15043 // CHECK15-NEXT:    [[N:%.*]] = alloca i32, align 4
15044 // CHECK15-NEXT:    [[CH:%.*]] = alloca i32, align 4
15045 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15046 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15047 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15048 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15049 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15050 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15051 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15052 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
15053 // CHECK15-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
15054 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
15055 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
15056 // CHECK15-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
15057 // CHECK15-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
15058 // CHECK15-NEXT:    [[I21:%.*]] = alloca i32, align 4
15059 // CHECK15-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
15060 // CHECK15-NEXT:    [[I25:%.*]] = alloca i32, align 4
15061 // CHECK15-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
15062 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
15063 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
15064 // CHECK15-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
15065 // CHECK15-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
15066 // CHECK15-NEXT:    [[I52:%.*]] = alloca i32, align 4
15067 // CHECK15-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
15068 // CHECK15-NEXT:    [[I56:%.*]] = alloca i32, align 4
15069 // CHECK15-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
15070 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
15071 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
15072 // CHECK15-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
15073 // CHECK15-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
15074 // CHECK15-NEXT:    [[I83:%.*]] = alloca i32, align 4
15075 // CHECK15-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
15076 // CHECK15-NEXT:    [[I87:%.*]] = alloca i32, align 4
15077 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
15078 // CHECK15-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
15079 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
15080 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
15081 // CHECK15-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
15082 // CHECK15-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
15083 // CHECK15-NEXT:    [[I115:%.*]] = alloca i32, align 4
15084 // CHECK15-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
15085 // CHECK15-NEXT:    [[I119:%.*]] = alloca i32, align 4
15086 // CHECK15-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
15087 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
15088 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
15089 // CHECK15-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
15090 // CHECK15-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
15091 // CHECK15-NEXT:    [[I146:%.*]] = alloca i32, align 4
15092 // CHECK15-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
15093 // CHECK15-NEXT:    [[I150:%.*]] = alloca i32, align 4
15094 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
15095 // CHECK15-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
15096 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
15097 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
15098 // CHECK15-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
15099 // CHECK15-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
15100 // CHECK15-NEXT:    [[I178:%.*]] = alloca i32, align 4
15101 // CHECK15-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
15102 // CHECK15-NEXT:    [[I182:%.*]] = alloca i32, align 4
15103 // CHECK15-NEXT:    store i32 0, i32* [[RETVAL]], align 4
15104 // CHECK15-NEXT:    store i32 10000, i32* [[N]], align 4
15105 // CHECK15-NEXT:    store i32 100, i32* [[CH]], align 4
15106 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
15107 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
15108 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15109 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
15110 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15111 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15112 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15113 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15114 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15115 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
15116 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15117 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15118 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
15119 // CHECK15-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
15120 // CHECK15:       simd.if.then:
15121 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15122 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
15123 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15124 // CHECK15:       omp.inner.for.cond:
15125 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15126 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3
15127 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
15128 // CHECK15-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15129 // CHECK15:       omp.inner.for.body:
15130 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15131 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
15132 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15133 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3
15134 // CHECK15-NEXT:    [[TMP8:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !3
15135 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
15136 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP8]], i32 [[TMP9]]
15137 // CHECK15-NEXT:    [[TMP10:%.*]] = load double, double* [[ARRAYIDX]], align 4, !llvm.access.group !3
15138 // CHECK15-NEXT:    [[TMP11:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !3
15139 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
15140 // CHECK15-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[TMP11]], i32 [[TMP12]]
15141 // CHECK15-NEXT:    [[TMP13:%.*]] = load double, double* [[ARRAYIDX5]], align 4, !llvm.access.group !3
15142 // CHECK15-NEXT:    [[ADD6:%.*]] = fadd double [[TMP10]], [[TMP13]]
15143 // CHECK15-NEXT:    [[TMP14:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !3
15144 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3
15145 // CHECK15-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds double, double* [[TMP14]], i32 [[TMP15]]
15146 // CHECK15-NEXT:    store double [[ADD6]], double* [[ARRAYIDX7]], align 4, !llvm.access.group !3
15147 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15148 // CHECK15:       omp.body.continue:
15149 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15150 // CHECK15:       omp.inner.for.inc:
15151 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15152 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
15153 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
15154 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
15155 // CHECK15:       omp.inner.for.end:
15156 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15157 // CHECK15-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
15158 // CHECK15-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
15159 // CHECK15-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
15160 // CHECK15-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
15161 // CHECK15-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
15162 // CHECK15-NEXT:    br label [[SIMD_IF_END]]
15163 // CHECK15:       simd.if.end:
15164 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
15165 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
15166 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
15167 // CHECK15-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
15168 // CHECK15-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
15169 // CHECK15-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
15170 // CHECK15-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
15171 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
15172 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
15173 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
15174 // CHECK15-NEXT:    store i32 0, i32* [[I21]], align 4
15175 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
15176 // CHECK15-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
15177 // CHECK15-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
15178 // CHECK15:       simd.if.then23:
15179 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
15180 // CHECK15-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
15181 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
15182 // CHECK15:       omp.inner.for.cond26:
15183 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
15184 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !7
15185 // CHECK15-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
15186 // CHECK15-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
15187 // CHECK15:       omp.inner.for.body28:
15188 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
15189 // CHECK15-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
15190 // CHECK15-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
15191 // CHECK15-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !7
15192 // CHECK15-NEXT:    [[TMP26:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !7
15193 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
15194 // CHECK15-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds double, double* [[TMP26]], i32 [[TMP27]]
15195 // CHECK15-NEXT:    [[TMP28:%.*]] = load double, double* [[ARRAYIDX31]], align 4, !llvm.access.group !7
15196 // CHECK15-NEXT:    [[TMP29:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !7
15197 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
15198 // CHECK15-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds double, double* [[TMP29]], i32 [[TMP30]]
15199 // CHECK15-NEXT:    [[TMP31:%.*]] = load double, double* [[ARRAYIDX32]], align 4, !llvm.access.group !7
15200 // CHECK15-NEXT:    [[ADD33:%.*]] = fadd double [[TMP28]], [[TMP31]]
15201 // CHECK15-NEXT:    [[TMP32:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !7
15202 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !7
15203 // CHECK15-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds double, double* [[TMP32]], i32 [[TMP33]]
15204 // CHECK15-NEXT:    store double [[ADD33]], double* [[ARRAYIDX34]], align 4, !llvm.access.group !7
15205 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
15206 // CHECK15:       omp.body.continue35:
15207 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
15208 // CHECK15:       omp.inner.for.inc36:
15209 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
15210 // CHECK15-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
15211 // CHECK15-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !7
15212 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP8:![0-9]+]]
15213 // CHECK15:       omp.inner.for.end38:
15214 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
15215 // CHECK15-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
15216 // CHECK15-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
15217 // CHECK15-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
15218 // CHECK15-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
15219 // CHECK15-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
15220 // CHECK15-NEXT:    br label [[SIMD_IF_END43]]
15221 // CHECK15:       simd.if.end43:
15222 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
15223 // CHECK15-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
15224 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
15225 // CHECK15-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
15226 // CHECK15-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
15227 // CHECK15-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
15228 // CHECK15-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
15229 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
15230 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
15231 // CHECK15-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
15232 // CHECK15-NEXT:    store i32 0, i32* [[I52]], align 4
15233 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
15234 // CHECK15-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
15235 // CHECK15-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
15236 // CHECK15:       simd.if.then54:
15237 // CHECK15-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
15238 // CHECK15-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
15239 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
15240 // CHECK15:       omp.inner.for.cond57:
15241 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
15242 // CHECK15-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !10
15243 // CHECK15-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
15244 // CHECK15-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
15245 // CHECK15:       omp.inner.for.body59:
15246 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
15247 // CHECK15-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
15248 // CHECK15-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
15249 // CHECK15-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !10
15250 // CHECK15-NEXT:    [[TMP44:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !10
15251 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
15252 // CHECK15-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds double, double* [[TMP44]], i32 [[TMP45]]
15253 // CHECK15-NEXT:    [[TMP46:%.*]] = load double, double* [[ARRAYIDX62]], align 4, !llvm.access.group !10
15254 // CHECK15-NEXT:    [[TMP47:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !10
15255 // CHECK15-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
15256 // CHECK15-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds double, double* [[TMP47]], i32 [[TMP48]]
15257 // CHECK15-NEXT:    [[TMP49:%.*]] = load double, double* [[ARRAYIDX63]], align 4, !llvm.access.group !10
15258 // CHECK15-NEXT:    [[ADD64:%.*]] = fadd double [[TMP46]], [[TMP49]]
15259 // CHECK15-NEXT:    [[TMP50:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !10
15260 // CHECK15-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !10
15261 // CHECK15-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds double, double* [[TMP50]], i32 [[TMP51]]
15262 // CHECK15-NEXT:    store double [[ADD64]], double* [[ARRAYIDX65]], align 4, !llvm.access.group !10
15263 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
15264 // CHECK15:       omp.body.continue66:
15265 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
15266 // CHECK15:       omp.inner.for.inc67:
15267 // CHECK15-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
15268 // CHECK15-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
15269 // CHECK15-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !10
15270 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP11:![0-9]+]]
15271 // CHECK15:       omp.inner.for.end69:
15272 // CHECK15-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
15273 // CHECK15-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
15274 // CHECK15-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
15275 // CHECK15-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
15276 // CHECK15-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
15277 // CHECK15-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
15278 // CHECK15-NEXT:    br label [[SIMD_IF_END74]]
15279 // CHECK15:       simd.if.end74:
15280 // CHECK15-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
15281 // CHECK15-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
15282 // CHECK15-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
15283 // CHECK15-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
15284 // CHECK15-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
15285 // CHECK15-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
15286 // CHECK15-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
15287 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
15288 // CHECK15-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
15289 // CHECK15-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
15290 // CHECK15-NEXT:    store i32 0, i32* [[I83]], align 4
15291 // CHECK15-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
15292 // CHECK15-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
15293 // CHECK15-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
15294 // CHECK15:       simd.if.then85:
15295 // CHECK15-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
15296 // CHECK15-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
15297 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
15298 // CHECK15:       omp.inner.for.cond88:
15299 // CHECK15-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
15300 // CHECK15-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !13
15301 // CHECK15-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
15302 // CHECK15-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
15303 // CHECK15:       omp.inner.for.body90:
15304 // CHECK15-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
15305 // CHECK15-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
15306 // CHECK15-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
15307 // CHECK15-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !13
15308 // CHECK15-NEXT:    [[TMP62:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !13
15309 // CHECK15-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
15310 // CHECK15-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds double, double* [[TMP62]], i32 [[TMP63]]
15311 // CHECK15-NEXT:    [[TMP64:%.*]] = load double, double* [[ARRAYIDX93]], align 4, !llvm.access.group !13
15312 // CHECK15-NEXT:    [[TMP65:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !13
15313 // CHECK15-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
15314 // CHECK15-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds double, double* [[TMP65]], i32 [[TMP66]]
15315 // CHECK15-NEXT:    [[TMP67:%.*]] = load double, double* [[ARRAYIDX94]], align 4, !llvm.access.group !13
15316 // CHECK15-NEXT:    [[ADD95:%.*]] = fadd double [[TMP64]], [[TMP67]]
15317 // CHECK15-NEXT:    [[TMP68:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !13
15318 // CHECK15-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !13
15319 // CHECK15-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds double, double* [[TMP68]], i32 [[TMP69]]
15320 // CHECK15-NEXT:    store double [[ADD95]], double* [[ARRAYIDX96]], align 4, !llvm.access.group !13
15321 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
15322 // CHECK15:       omp.body.continue97:
15323 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
15324 // CHECK15:       omp.inner.for.inc98:
15325 // CHECK15-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
15326 // CHECK15-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
15327 // CHECK15-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !13
15328 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP14:![0-9]+]]
15329 // CHECK15:       omp.inner.for.end100:
15330 // CHECK15-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
15331 // CHECK15-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
15332 // CHECK15-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
15333 // CHECK15-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
15334 // CHECK15-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
15335 // CHECK15-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
15336 // CHECK15-NEXT:    br label [[SIMD_IF_END105]]
15337 // CHECK15:       simd.if.end105:
15338 // CHECK15-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
15339 // CHECK15-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
15340 // CHECK15-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
15341 // CHECK15-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
15342 // CHECK15-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
15343 // CHECK15-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
15344 // CHECK15-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
15345 // CHECK15-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
15346 // CHECK15-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
15347 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
15348 // CHECK15-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
15349 // CHECK15-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
15350 // CHECK15-NEXT:    store i32 0, i32* [[I115]], align 4
15351 // CHECK15-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
15352 // CHECK15-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
15353 // CHECK15-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
15354 // CHECK15:       simd.if.then117:
15355 // CHECK15-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
15356 // CHECK15-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
15357 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
15358 // CHECK15:       omp.inner.for.cond120:
15359 // CHECK15-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
15360 // CHECK15-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !16
15361 // CHECK15-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
15362 // CHECK15-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
15363 // CHECK15:       omp.inner.for.body122:
15364 // CHECK15-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
15365 // CHECK15-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
15366 // CHECK15-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
15367 // CHECK15-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !16
15368 // CHECK15-NEXT:    [[TMP81:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !16
15369 // CHECK15-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
15370 // CHECK15-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds double, double* [[TMP81]], i32 [[TMP82]]
15371 // CHECK15-NEXT:    [[TMP83:%.*]] = load double, double* [[ARRAYIDX125]], align 4, !llvm.access.group !16
15372 // CHECK15-NEXT:    [[TMP84:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !16
15373 // CHECK15-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
15374 // CHECK15-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds double, double* [[TMP84]], i32 [[TMP85]]
15375 // CHECK15-NEXT:    [[TMP86:%.*]] = load double, double* [[ARRAYIDX126]], align 4, !llvm.access.group !16
15376 // CHECK15-NEXT:    [[ADD127:%.*]] = fadd double [[TMP83]], [[TMP86]]
15377 // CHECK15-NEXT:    [[TMP87:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !16
15378 // CHECK15-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !16
15379 // CHECK15-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds double, double* [[TMP87]], i32 [[TMP88]]
15380 // CHECK15-NEXT:    store double [[ADD127]], double* [[ARRAYIDX128]], align 4, !llvm.access.group !16
15381 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
15382 // CHECK15:       omp.body.continue129:
15383 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
15384 // CHECK15:       omp.inner.for.inc130:
15385 // CHECK15-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
15386 // CHECK15-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
15387 // CHECK15-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !16
15388 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP17:![0-9]+]]
15389 // CHECK15:       omp.inner.for.end132:
15390 // CHECK15-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
15391 // CHECK15-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
15392 // CHECK15-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
15393 // CHECK15-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
15394 // CHECK15-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
15395 // CHECK15-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
15396 // CHECK15-NEXT:    br label [[SIMD_IF_END137]]
15397 // CHECK15:       simd.if.end137:
15398 // CHECK15-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
15399 // CHECK15-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
15400 // CHECK15-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
15401 // CHECK15-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
15402 // CHECK15-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
15403 // CHECK15-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
15404 // CHECK15-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
15405 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
15406 // CHECK15-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
15407 // CHECK15-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
15408 // CHECK15-NEXT:    store i32 0, i32* [[I146]], align 4
15409 // CHECK15-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
15410 // CHECK15-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
15411 // CHECK15-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
15412 // CHECK15:       simd.if.then148:
15413 // CHECK15-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
15414 // CHECK15-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
15415 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
15416 // CHECK15:       omp.inner.for.cond151:
15417 // CHECK15-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
15418 // CHECK15-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !19
15419 // CHECK15-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
15420 // CHECK15-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
15421 // CHECK15:       omp.inner.for.body153:
15422 // CHECK15-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
15423 // CHECK15-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
15424 // CHECK15-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
15425 // CHECK15-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !19
15426 // CHECK15-NEXT:    [[TMP99:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !19
15427 // CHECK15-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
15428 // CHECK15-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds double, double* [[TMP99]], i32 [[TMP100]]
15429 // CHECK15-NEXT:    [[TMP101:%.*]] = load double, double* [[ARRAYIDX156]], align 4, !llvm.access.group !19
15430 // CHECK15-NEXT:    [[TMP102:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !19
15431 // CHECK15-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
15432 // CHECK15-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds double, double* [[TMP102]], i32 [[TMP103]]
15433 // CHECK15-NEXT:    [[TMP104:%.*]] = load double, double* [[ARRAYIDX157]], align 4, !llvm.access.group !19
15434 // CHECK15-NEXT:    [[ADD158:%.*]] = fadd double [[TMP101]], [[TMP104]]
15435 // CHECK15-NEXT:    [[TMP105:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !19
15436 // CHECK15-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !19
15437 // CHECK15-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds double, double* [[TMP105]], i32 [[TMP106]]
15438 // CHECK15-NEXT:    store double [[ADD158]], double* [[ARRAYIDX159]], align 4, !llvm.access.group !19
15439 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
15440 // CHECK15:       omp.body.continue160:
15441 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
15442 // CHECK15:       omp.inner.for.inc161:
15443 // CHECK15-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
15444 // CHECK15-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
15445 // CHECK15-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !19
15446 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP20:![0-9]+]]
15447 // CHECK15:       omp.inner.for.end163:
15448 // CHECK15-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
15449 // CHECK15-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
15450 // CHECK15-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
15451 // CHECK15-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
15452 // CHECK15-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
15453 // CHECK15-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
15454 // CHECK15-NEXT:    br label [[SIMD_IF_END168]]
15455 // CHECK15:       simd.if.end168:
15456 // CHECK15-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
15457 // CHECK15-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
15458 // CHECK15-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
15459 // CHECK15-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
15460 // CHECK15-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
15461 // CHECK15-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
15462 // CHECK15-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
15463 // CHECK15-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
15464 // CHECK15-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
15465 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
15466 // CHECK15-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
15467 // CHECK15-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
15468 // CHECK15-NEXT:    store i32 0, i32* [[I178]], align 4
15469 // CHECK15-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
15470 // CHECK15-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
15471 // CHECK15-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
15472 // CHECK15:       simd.if.then180:
15473 // CHECK15-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
15474 // CHECK15-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
15475 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
15476 // CHECK15:       omp.inner.for.cond183:
15477 // CHECK15-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
15478 // CHECK15-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !22
15479 // CHECK15-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
15480 // CHECK15-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
15481 // CHECK15:       omp.inner.for.body185:
15482 // CHECK15-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
15483 // CHECK15-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
15484 // CHECK15-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
15485 // CHECK15-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !22
15486 // CHECK15-NEXT:    [[TMP118:%.*]] = load double*, double** [[B]], align 4, !llvm.access.group !22
15487 // CHECK15-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
15488 // CHECK15-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds double, double* [[TMP118]], i32 [[TMP119]]
15489 // CHECK15-NEXT:    [[TMP120:%.*]] = load double, double* [[ARRAYIDX188]], align 4, !llvm.access.group !22
15490 // CHECK15-NEXT:    [[TMP121:%.*]] = load double*, double** [[C]], align 4, !llvm.access.group !22
15491 // CHECK15-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
15492 // CHECK15-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds double, double* [[TMP121]], i32 [[TMP122]]
15493 // CHECK15-NEXT:    [[TMP123:%.*]] = load double, double* [[ARRAYIDX189]], align 4, !llvm.access.group !22
15494 // CHECK15-NEXT:    [[ADD190:%.*]] = fadd double [[TMP120]], [[TMP123]]
15495 // CHECK15-NEXT:    [[TMP124:%.*]] = load double*, double** [[A]], align 4, !llvm.access.group !22
15496 // CHECK15-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !22
15497 // CHECK15-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds double, double* [[TMP124]], i32 [[TMP125]]
15498 // CHECK15-NEXT:    store double [[ADD190]], double* [[ARRAYIDX191]], align 4, !llvm.access.group !22
15499 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
15500 // CHECK15:       omp.body.continue192:
15501 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
15502 // CHECK15:       omp.inner.for.inc193:
15503 // CHECK15-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
15504 // CHECK15-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
15505 // CHECK15-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !22
15506 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP23:![0-9]+]]
15507 // CHECK15:       omp.inner.for.end195:
15508 // CHECK15-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
15509 // CHECK15-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
15510 // CHECK15-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
15511 // CHECK15-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
15512 // CHECK15-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
15513 // CHECK15-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
15514 // CHECK15-NEXT:    br label [[SIMD_IF_END200]]
15515 // CHECK15:       simd.if.end200:
15516 // CHECK15-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
15517 // CHECK15-NEXT:    ret i32 [[CALL]]
15518 //
15519 //
15520 // CHECK15-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
15521 // CHECK15-SAME: () #[[ATTR1:[0-9]+]] comdat {
15522 // CHECK15-NEXT:  entry:
15523 // CHECK15-NEXT:    [[A:%.*]] = alloca i32*, align 4
15524 // CHECK15-NEXT:    [[B:%.*]] = alloca i32*, align 4
15525 // CHECK15-NEXT:    [[C:%.*]] = alloca i32*, align 4
15526 // CHECK15-NEXT:    [[N:%.*]] = alloca i32, align 4
15527 // CHECK15-NEXT:    [[CH:%.*]] = alloca i32, align 4
15528 // CHECK15-NEXT:    [[TMP:%.*]] = alloca i32, align 4
15529 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15530 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
15531 // CHECK15-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
15532 // CHECK15-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
15533 // CHECK15-NEXT:    [[I:%.*]] = alloca i32, align 4
15534 // CHECK15-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
15535 // CHECK15-NEXT:    [[I3:%.*]] = alloca i32, align 4
15536 // CHECK15-NEXT:    [[_TMP13:%.*]] = alloca i32, align 4
15537 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_14:%.*]] = alloca i32, align 4
15538 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_15:%.*]] = alloca i32, align 4
15539 // CHECK15-NEXT:    [[DOTOMP_LB19:%.*]] = alloca i32, align 4
15540 // CHECK15-NEXT:    [[DOTOMP_UB20:%.*]] = alloca i32, align 4
15541 // CHECK15-NEXT:    [[I21:%.*]] = alloca i32, align 4
15542 // CHECK15-NEXT:    [[DOTOMP_IV24:%.*]] = alloca i32, align 4
15543 // CHECK15-NEXT:    [[I25:%.*]] = alloca i32, align 4
15544 // CHECK15-NEXT:    [[_TMP44:%.*]] = alloca i32, align 4
15545 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_45:%.*]] = alloca i32, align 4
15546 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
15547 // CHECK15-NEXT:    [[DOTOMP_LB50:%.*]] = alloca i32, align 4
15548 // CHECK15-NEXT:    [[DOTOMP_UB51:%.*]] = alloca i32, align 4
15549 // CHECK15-NEXT:    [[I52:%.*]] = alloca i32, align 4
15550 // CHECK15-NEXT:    [[DOTOMP_IV55:%.*]] = alloca i32, align 4
15551 // CHECK15-NEXT:    [[I56:%.*]] = alloca i32, align 4
15552 // CHECK15-NEXT:    [[_TMP75:%.*]] = alloca i32, align 4
15553 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_76:%.*]] = alloca i32, align 4
15554 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_77:%.*]] = alloca i32, align 4
15555 // CHECK15-NEXT:    [[DOTOMP_LB81:%.*]] = alloca i32, align 4
15556 // CHECK15-NEXT:    [[DOTOMP_UB82:%.*]] = alloca i32, align 4
15557 // CHECK15-NEXT:    [[I83:%.*]] = alloca i32, align 4
15558 // CHECK15-NEXT:    [[DOTOMP_IV86:%.*]] = alloca i32, align 4
15559 // CHECK15-NEXT:    [[I87:%.*]] = alloca i32, align 4
15560 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_106:%.*]] = alloca i32, align 4
15561 // CHECK15-NEXT:    [[_TMP107:%.*]] = alloca i32, align 4
15562 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_108:%.*]] = alloca i32, align 4
15563 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_109:%.*]] = alloca i32, align 4
15564 // CHECK15-NEXT:    [[DOTOMP_LB113:%.*]] = alloca i32, align 4
15565 // CHECK15-NEXT:    [[DOTOMP_UB114:%.*]] = alloca i32, align 4
15566 // CHECK15-NEXT:    [[I115:%.*]] = alloca i32, align 4
15567 // CHECK15-NEXT:    [[DOTOMP_IV118:%.*]] = alloca i32, align 4
15568 // CHECK15-NEXT:    [[I119:%.*]] = alloca i32, align 4
15569 // CHECK15-NEXT:    [[_TMP138:%.*]] = alloca i32, align 4
15570 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_139:%.*]] = alloca i32, align 4
15571 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_140:%.*]] = alloca i32, align 4
15572 // CHECK15-NEXT:    [[DOTOMP_LB144:%.*]] = alloca i32, align 4
15573 // CHECK15-NEXT:    [[DOTOMP_UB145:%.*]] = alloca i32, align 4
15574 // CHECK15-NEXT:    [[I146:%.*]] = alloca i32, align 4
15575 // CHECK15-NEXT:    [[DOTOMP_IV149:%.*]] = alloca i32, align 4
15576 // CHECK15-NEXT:    [[I150:%.*]] = alloca i32, align 4
15577 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_169:%.*]] = alloca i32, align 4
15578 // CHECK15-NEXT:    [[_TMP170:%.*]] = alloca i32, align 4
15579 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_171:%.*]] = alloca i32, align 4
15580 // CHECK15-NEXT:    [[DOTCAPTURE_EXPR_172:%.*]] = alloca i32, align 4
15581 // CHECK15-NEXT:    [[DOTOMP_LB176:%.*]] = alloca i32, align 4
15582 // CHECK15-NEXT:    [[DOTOMP_UB177:%.*]] = alloca i32, align 4
15583 // CHECK15-NEXT:    [[I178:%.*]] = alloca i32, align 4
15584 // CHECK15-NEXT:    [[DOTOMP_IV181:%.*]] = alloca i32, align 4
15585 // CHECK15-NEXT:    [[I182:%.*]] = alloca i32, align 4
15586 // CHECK15-NEXT:    store i32 10000, i32* [[N]], align 4
15587 // CHECK15-NEXT:    store i32 100, i32* [[CH]], align 4
15588 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N]], align 4
15589 // CHECK15-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
15590 // CHECK15-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15591 // CHECK15-NEXT:    [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
15592 // CHECK15-NEXT:    [[DIV:%.*]] = sdiv i32 [[SUB]], 1
15593 // CHECK15-NEXT:    [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
15594 // CHECK15-NEXT:    store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
15595 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
15596 // CHECK15-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
15597 // CHECK15-NEXT:    store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
15598 // CHECK15-NEXT:    store i32 0, i32* [[I]], align 4
15599 // CHECK15-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15600 // CHECK15-NEXT:    [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
15601 // CHECK15-NEXT:    br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
15602 // CHECK15:       simd.if.then:
15603 // CHECK15-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
15604 // CHECK15-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
15605 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
15606 // CHECK15:       omp.inner.for.cond:
15607 // CHECK15-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
15608 // CHECK15-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
15609 // CHECK15-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
15610 // CHECK15-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15611 // CHECK15:       omp.inner.for.body:
15612 // CHECK15-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
15613 // CHECK15-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
15614 // CHECK15-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15615 // CHECK15-NEXT:    store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !25
15616 // CHECK15-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !25
15617 // CHECK15-NEXT:    [[TMP9:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
15618 // CHECK15-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 [[TMP9]]
15619 // CHECK15-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
15620 // CHECK15-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !25
15621 // CHECK15-NEXT:    [[TMP12:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
15622 // CHECK15-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i32 [[TMP12]]
15623 // CHECK15-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX5]], align 4, !llvm.access.group !25
15624 // CHECK15-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP10]], [[TMP13]]
15625 // CHECK15-NEXT:    [[TMP14:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !25
15626 // CHECK15-NEXT:    [[TMP15:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !25
15627 // CHECK15-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP14]], i32 [[TMP15]]
15628 // CHECK15-NEXT:    store i32 [[ADD6]], i32* [[ARRAYIDX7]], align 4, !llvm.access.group !25
15629 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
15630 // CHECK15:       omp.body.continue:
15631 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
15632 // CHECK15:       omp.inner.for.inc:
15633 // CHECK15-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
15634 // CHECK15-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
15635 // CHECK15-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
15636 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
15637 // CHECK15:       omp.inner.for.end:
15638 // CHECK15-NEXT:    [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
15639 // CHECK15-NEXT:    [[SUB9:%.*]] = sub nsw i32 [[TMP17]], 0
15640 // CHECK15-NEXT:    [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
15641 // CHECK15-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
15642 // CHECK15-NEXT:    [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
15643 // CHECK15-NEXT:    store i32 [[ADD12]], i32* [[I3]], align 4
15644 // CHECK15-NEXT:    br label [[SIMD_IF_END]]
15645 // CHECK15:       simd.if.end:
15646 // CHECK15-NEXT:    [[TMP18:%.*]] = load i32, i32* [[N]], align 4
15647 // CHECK15-NEXT:    store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_14]], align 4
15648 // CHECK15-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
15649 // CHECK15-NEXT:    [[SUB16:%.*]] = sub nsw i32 [[TMP19]], 0
15650 // CHECK15-NEXT:    [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1
15651 // CHECK15-NEXT:    [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1
15652 // CHECK15-NEXT:    store i32 [[SUB18]], i32* [[DOTCAPTURE_EXPR_15]], align 4
15653 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB19]], align 4
15654 // CHECK15-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_15]], align 4
15655 // CHECK15-NEXT:    store i32 [[TMP20]], i32* [[DOTOMP_UB20]], align 4
15656 // CHECK15-NEXT:    store i32 0, i32* [[I21]], align 4
15657 // CHECK15-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
15658 // CHECK15-NEXT:    [[CMP22:%.*]] = icmp slt i32 0, [[TMP21]]
15659 // CHECK15-NEXT:    br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END43:%.*]]
15660 // CHECK15:       simd.if.then23:
15661 // CHECK15-NEXT:    [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4
15662 // CHECK15-NEXT:    store i32 [[TMP22]], i32* [[DOTOMP_IV24]], align 4
15663 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26:%.*]]
15664 // CHECK15:       omp.inner.for.cond26:
15665 // CHECK15-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
15666 // CHECK15-NEXT:    [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !28
15667 // CHECK15-NEXT:    [[CMP27:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]]
15668 // CHECK15-NEXT:    br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END38:%.*]]
15669 // CHECK15:       omp.inner.for.body28:
15670 // CHECK15-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
15671 // CHECK15-NEXT:    [[MUL29:%.*]] = mul nsw i32 [[TMP25]], 1
15672 // CHECK15-NEXT:    [[ADD30:%.*]] = add nsw i32 0, [[MUL29]]
15673 // CHECK15-NEXT:    store i32 [[ADD30]], i32* [[I25]], align 4, !llvm.access.group !28
15674 // CHECK15-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !28
15675 // CHECK15-NEXT:    [[TMP27:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
15676 // CHECK15-NEXT:    [[ARRAYIDX31:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]]
15677 // CHECK15-NEXT:    [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX31]], align 4, !llvm.access.group !28
15678 // CHECK15-NEXT:    [[TMP29:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !28
15679 // CHECK15-NEXT:    [[TMP30:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
15680 // CHECK15-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 [[TMP30]]
15681 // CHECK15-NEXT:    [[TMP31:%.*]] = load i32, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !28
15682 // CHECK15-NEXT:    [[ADD33:%.*]] = add nsw i32 [[TMP28]], [[TMP31]]
15683 // CHECK15-NEXT:    [[TMP32:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !28
15684 // CHECK15-NEXT:    [[TMP33:%.*]] = load i32, i32* [[I25]], align 4, !llvm.access.group !28
15685 // CHECK15-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 [[TMP33]]
15686 // CHECK15-NEXT:    store i32 [[ADD33]], i32* [[ARRAYIDX34]], align 4, !llvm.access.group !28
15687 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE35:%.*]]
15688 // CHECK15:       omp.body.continue35:
15689 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC36:%.*]]
15690 // CHECK15:       omp.inner.for.inc36:
15691 // CHECK15-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
15692 // CHECK15-NEXT:    [[ADD37:%.*]] = add nsw i32 [[TMP34]], 1
15693 // CHECK15-NEXT:    store i32 [[ADD37]], i32* [[DOTOMP_IV24]], align 4, !llvm.access.group !28
15694 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP29:![0-9]+]]
15695 // CHECK15:       omp.inner.for.end38:
15696 // CHECK15-NEXT:    [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_14]], align 4
15697 // CHECK15-NEXT:    [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0
15698 // CHECK15-NEXT:    [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1
15699 // CHECK15-NEXT:    [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1
15700 // CHECK15-NEXT:    [[ADD42:%.*]] = add nsw i32 0, [[MUL41]]
15701 // CHECK15-NEXT:    store i32 [[ADD42]], i32* [[I25]], align 4
15702 // CHECK15-NEXT:    br label [[SIMD_IF_END43]]
15703 // CHECK15:       simd.if.end43:
15704 // CHECK15-NEXT:    [[TMP36:%.*]] = load i32, i32* [[N]], align 4
15705 // CHECK15-NEXT:    store i32 [[TMP36]], i32* [[DOTCAPTURE_EXPR_45]], align 4
15706 // CHECK15-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
15707 // CHECK15-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[TMP37]], 0
15708 // CHECK15-NEXT:    [[DIV48:%.*]] = sdiv i32 [[SUB47]], 1
15709 // CHECK15-NEXT:    [[SUB49:%.*]] = sub nsw i32 [[DIV48]], 1
15710 // CHECK15-NEXT:    store i32 [[SUB49]], i32* [[DOTCAPTURE_EXPR_46]], align 4
15711 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB50]], align 4
15712 // CHECK15-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
15713 // CHECK15-NEXT:    store i32 [[TMP38]], i32* [[DOTOMP_UB51]], align 4
15714 // CHECK15-NEXT:    store i32 0, i32* [[I52]], align 4
15715 // CHECK15-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
15716 // CHECK15-NEXT:    [[CMP53:%.*]] = icmp slt i32 0, [[TMP39]]
15717 // CHECK15-NEXT:    br i1 [[CMP53]], label [[SIMD_IF_THEN54:%.*]], label [[SIMD_IF_END74:%.*]]
15718 // CHECK15:       simd.if.then54:
15719 // CHECK15-NEXT:    [[TMP40:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4
15720 // CHECK15-NEXT:    store i32 [[TMP40]], i32* [[DOTOMP_IV55]], align 4
15721 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57:%.*]]
15722 // CHECK15:       omp.inner.for.cond57:
15723 // CHECK15-NEXT:    [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
15724 // CHECK15-NEXT:    [[TMP42:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group !31
15725 // CHECK15-NEXT:    [[CMP58:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
15726 // CHECK15-NEXT:    br i1 [[CMP58]], label [[OMP_INNER_FOR_BODY59:%.*]], label [[OMP_INNER_FOR_END69:%.*]]
15727 // CHECK15:       omp.inner.for.body59:
15728 // CHECK15-NEXT:    [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
15729 // CHECK15-NEXT:    [[MUL60:%.*]] = mul nsw i32 [[TMP43]], 1
15730 // CHECK15-NEXT:    [[ADD61:%.*]] = add nsw i32 0, [[MUL60]]
15731 // CHECK15-NEXT:    store i32 [[ADD61]], i32* [[I56]], align 4, !llvm.access.group !31
15732 // CHECK15-NEXT:    [[TMP44:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !31
15733 // CHECK15-NEXT:    [[TMP45:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
15734 // CHECK15-NEXT:    [[ARRAYIDX62:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i32 [[TMP45]]
15735 // CHECK15-NEXT:    [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX62]], align 4, !llvm.access.group !31
15736 // CHECK15-NEXT:    [[TMP47:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !31
15737 // CHECK15-NEXT:    [[TMP48:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
15738 // CHECK15-NEXT:    [[ARRAYIDX63:%.*]] = getelementptr inbounds i32, i32* [[TMP47]], i32 [[TMP48]]
15739 // CHECK15-NEXT:    [[TMP49:%.*]] = load i32, i32* [[ARRAYIDX63]], align 4, !llvm.access.group !31
15740 // CHECK15-NEXT:    [[ADD64:%.*]] = add nsw i32 [[TMP46]], [[TMP49]]
15741 // CHECK15-NEXT:    [[TMP50:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !31
15742 // CHECK15-NEXT:    [[TMP51:%.*]] = load i32, i32* [[I56]], align 4, !llvm.access.group !31
15743 // CHECK15-NEXT:    [[ARRAYIDX65:%.*]] = getelementptr inbounds i32, i32* [[TMP50]], i32 [[TMP51]]
15744 // CHECK15-NEXT:    store i32 [[ADD64]], i32* [[ARRAYIDX65]], align 4, !llvm.access.group !31
15745 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE66:%.*]]
15746 // CHECK15:       omp.body.continue66:
15747 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC67:%.*]]
15748 // CHECK15:       omp.inner.for.inc67:
15749 // CHECK15-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
15750 // CHECK15-NEXT:    [[ADD68:%.*]] = add nsw i32 [[TMP52]], 1
15751 // CHECK15-NEXT:    store i32 [[ADD68]], i32* [[DOTOMP_IV55]], align 4, !llvm.access.group !31
15752 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND57]], !llvm.loop [[LOOP32:![0-9]+]]
15753 // CHECK15:       omp.inner.for.end69:
15754 // CHECK15-NEXT:    [[TMP53:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_45]], align 4
15755 // CHECK15-NEXT:    [[SUB70:%.*]] = sub nsw i32 [[TMP53]], 0
15756 // CHECK15-NEXT:    [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
15757 // CHECK15-NEXT:    [[MUL72:%.*]] = mul nsw i32 [[DIV71]], 1
15758 // CHECK15-NEXT:    [[ADD73:%.*]] = add nsw i32 0, [[MUL72]]
15759 // CHECK15-NEXT:    store i32 [[ADD73]], i32* [[I56]], align 4
15760 // CHECK15-NEXT:    br label [[SIMD_IF_END74]]
15761 // CHECK15:       simd.if.end74:
15762 // CHECK15-NEXT:    [[TMP54:%.*]] = load i32, i32* [[N]], align 4
15763 // CHECK15-NEXT:    store i32 [[TMP54]], i32* [[DOTCAPTURE_EXPR_76]], align 4
15764 // CHECK15-NEXT:    [[TMP55:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
15765 // CHECK15-NEXT:    [[SUB78:%.*]] = sub nsw i32 [[TMP55]], 0
15766 // CHECK15-NEXT:    [[DIV79:%.*]] = sdiv i32 [[SUB78]], 1
15767 // CHECK15-NEXT:    [[SUB80:%.*]] = sub nsw i32 [[DIV79]], 1
15768 // CHECK15-NEXT:    store i32 [[SUB80]], i32* [[DOTCAPTURE_EXPR_77]], align 4
15769 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB81]], align 4
15770 // CHECK15-NEXT:    [[TMP56:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_77]], align 4
15771 // CHECK15-NEXT:    store i32 [[TMP56]], i32* [[DOTOMP_UB82]], align 4
15772 // CHECK15-NEXT:    store i32 0, i32* [[I83]], align 4
15773 // CHECK15-NEXT:    [[TMP57:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
15774 // CHECK15-NEXT:    [[CMP84:%.*]] = icmp slt i32 0, [[TMP57]]
15775 // CHECK15-NEXT:    br i1 [[CMP84]], label [[SIMD_IF_THEN85:%.*]], label [[SIMD_IF_END105:%.*]]
15776 // CHECK15:       simd.if.then85:
15777 // CHECK15-NEXT:    [[TMP58:%.*]] = load i32, i32* [[DOTOMP_LB81]], align 4
15778 // CHECK15-NEXT:    store i32 [[TMP58]], i32* [[DOTOMP_IV86]], align 4
15779 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88:%.*]]
15780 // CHECK15:       omp.inner.for.cond88:
15781 // CHECK15-NEXT:    [[TMP59:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
15782 // CHECK15-NEXT:    [[TMP60:%.*]] = load i32, i32* [[DOTOMP_UB82]], align 4, !llvm.access.group !34
15783 // CHECK15-NEXT:    [[CMP89:%.*]] = icmp sle i32 [[TMP59]], [[TMP60]]
15784 // CHECK15-NEXT:    br i1 [[CMP89]], label [[OMP_INNER_FOR_BODY90:%.*]], label [[OMP_INNER_FOR_END100:%.*]]
15785 // CHECK15:       omp.inner.for.body90:
15786 // CHECK15-NEXT:    [[TMP61:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
15787 // CHECK15-NEXT:    [[MUL91:%.*]] = mul nsw i32 [[TMP61]], 1
15788 // CHECK15-NEXT:    [[ADD92:%.*]] = add nsw i32 0, [[MUL91]]
15789 // CHECK15-NEXT:    store i32 [[ADD92]], i32* [[I87]], align 4, !llvm.access.group !34
15790 // CHECK15-NEXT:    [[TMP62:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !34
15791 // CHECK15-NEXT:    [[TMP63:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
15792 // CHECK15-NEXT:    [[ARRAYIDX93:%.*]] = getelementptr inbounds i32, i32* [[TMP62]], i32 [[TMP63]]
15793 // CHECK15-NEXT:    [[TMP64:%.*]] = load i32, i32* [[ARRAYIDX93]], align 4, !llvm.access.group !34
15794 // CHECK15-NEXT:    [[TMP65:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !34
15795 // CHECK15-NEXT:    [[TMP66:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
15796 // CHECK15-NEXT:    [[ARRAYIDX94:%.*]] = getelementptr inbounds i32, i32* [[TMP65]], i32 [[TMP66]]
15797 // CHECK15-NEXT:    [[TMP67:%.*]] = load i32, i32* [[ARRAYIDX94]], align 4, !llvm.access.group !34
15798 // CHECK15-NEXT:    [[ADD95:%.*]] = add nsw i32 [[TMP64]], [[TMP67]]
15799 // CHECK15-NEXT:    [[TMP68:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !34
15800 // CHECK15-NEXT:    [[TMP69:%.*]] = load i32, i32* [[I87]], align 4, !llvm.access.group !34
15801 // CHECK15-NEXT:    [[ARRAYIDX96:%.*]] = getelementptr inbounds i32, i32* [[TMP68]], i32 [[TMP69]]
15802 // CHECK15-NEXT:    store i32 [[ADD95]], i32* [[ARRAYIDX96]], align 4, !llvm.access.group !34
15803 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE97:%.*]]
15804 // CHECK15:       omp.body.continue97:
15805 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC98:%.*]]
15806 // CHECK15:       omp.inner.for.inc98:
15807 // CHECK15-NEXT:    [[TMP70:%.*]] = load i32, i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
15808 // CHECK15-NEXT:    [[ADD99:%.*]] = add nsw i32 [[TMP70]], 1
15809 // CHECK15-NEXT:    store i32 [[ADD99]], i32* [[DOTOMP_IV86]], align 4, !llvm.access.group !34
15810 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND88]], !llvm.loop [[LOOP35:![0-9]+]]
15811 // CHECK15:       omp.inner.for.end100:
15812 // CHECK15-NEXT:    [[TMP71:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_76]], align 4
15813 // CHECK15-NEXT:    [[SUB101:%.*]] = sub nsw i32 [[TMP71]], 0
15814 // CHECK15-NEXT:    [[DIV102:%.*]] = sdiv i32 [[SUB101]], 1
15815 // CHECK15-NEXT:    [[MUL103:%.*]] = mul nsw i32 [[DIV102]], 1
15816 // CHECK15-NEXT:    [[ADD104:%.*]] = add nsw i32 0, [[MUL103]]
15817 // CHECK15-NEXT:    store i32 [[ADD104]], i32* [[I87]], align 4
15818 // CHECK15-NEXT:    br label [[SIMD_IF_END105]]
15819 // CHECK15:       simd.if.end105:
15820 // CHECK15-NEXT:    [[TMP72:%.*]] = load i32, i32* [[CH]], align 4
15821 // CHECK15-NEXT:    store i32 [[TMP72]], i32* [[DOTCAPTURE_EXPR_106]], align 4
15822 // CHECK15-NEXT:    [[TMP73:%.*]] = load i32, i32* [[N]], align 4
15823 // CHECK15-NEXT:    store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_108]], align 4
15824 // CHECK15-NEXT:    [[TMP74:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
15825 // CHECK15-NEXT:    [[SUB110:%.*]] = sub nsw i32 [[TMP74]], 0
15826 // CHECK15-NEXT:    [[DIV111:%.*]] = sdiv i32 [[SUB110]], 1
15827 // CHECK15-NEXT:    [[SUB112:%.*]] = sub nsw i32 [[DIV111]], 1
15828 // CHECK15-NEXT:    store i32 [[SUB112]], i32* [[DOTCAPTURE_EXPR_109]], align 4
15829 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB113]], align 4
15830 // CHECK15-NEXT:    [[TMP75:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_109]], align 4
15831 // CHECK15-NEXT:    store i32 [[TMP75]], i32* [[DOTOMP_UB114]], align 4
15832 // CHECK15-NEXT:    store i32 0, i32* [[I115]], align 4
15833 // CHECK15-NEXT:    [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
15834 // CHECK15-NEXT:    [[CMP116:%.*]] = icmp slt i32 0, [[TMP76]]
15835 // CHECK15-NEXT:    br i1 [[CMP116]], label [[SIMD_IF_THEN117:%.*]], label [[SIMD_IF_END137:%.*]]
15836 // CHECK15:       simd.if.then117:
15837 // CHECK15-NEXT:    [[TMP77:%.*]] = load i32, i32* [[DOTOMP_LB113]], align 4
15838 // CHECK15-NEXT:    store i32 [[TMP77]], i32* [[DOTOMP_IV118]], align 4
15839 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120:%.*]]
15840 // CHECK15:       omp.inner.for.cond120:
15841 // CHECK15-NEXT:    [[TMP78:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
15842 // CHECK15-NEXT:    [[TMP79:%.*]] = load i32, i32* [[DOTOMP_UB114]], align 4, !llvm.access.group !37
15843 // CHECK15-NEXT:    [[CMP121:%.*]] = icmp sle i32 [[TMP78]], [[TMP79]]
15844 // CHECK15-NEXT:    br i1 [[CMP121]], label [[OMP_INNER_FOR_BODY122:%.*]], label [[OMP_INNER_FOR_END132:%.*]]
15845 // CHECK15:       omp.inner.for.body122:
15846 // CHECK15-NEXT:    [[TMP80:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
15847 // CHECK15-NEXT:    [[MUL123:%.*]] = mul nsw i32 [[TMP80]], 1
15848 // CHECK15-NEXT:    [[ADD124:%.*]] = add nsw i32 0, [[MUL123]]
15849 // CHECK15-NEXT:    store i32 [[ADD124]], i32* [[I119]], align 4, !llvm.access.group !37
15850 // CHECK15-NEXT:    [[TMP81:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !37
15851 // CHECK15-NEXT:    [[TMP82:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
15852 // CHECK15-NEXT:    [[ARRAYIDX125:%.*]] = getelementptr inbounds i32, i32* [[TMP81]], i32 [[TMP82]]
15853 // CHECK15-NEXT:    [[TMP83:%.*]] = load i32, i32* [[ARRAYIDX125]], align 4, !llvm.access.group !37
15854 // CHECK15-NEXT:    [[TMP84:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !37
15855 // CHECK15-NEXT:    [[TMP85:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
15856 // CHECK15-NEXT:    [[ARRAYIDX126:%.*]] = getelementptr inbounds i32, i32* [[TMP84]], i32 [[TMP85]]
15857 // CHECK15-NEXT:    [[TMP86:%.*]] = load i32, i32* [[ARRAYIDX126]], align 4, !llvm.access.group !37
15858 // CHECK15-NEXT:    [[ADD127:%.*]] = add nsw i32 [[TMP83]], [[TMP86]]
15859 // CHECK15-NEXT:    [[TMP87:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !37
15860 // CHECK15-NEXT:    [[TMP88:%.*]] = load i32, i32* [[I119]], align 4, !llvm.access.group !37
15861 // CHECK15-NEXT:    [[ARRAYIDX128:%.*]] = getelementptr inbounds i32, i32* [[TMP87]], i32 [[TMP88]]
15862 // CHECK15-NEXT:    store i32 [[ADD127]], i32* [[ARRAYIDX128]], align 4, !llvm.access.group !37
15863 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE129:%.*]]
15864 // CHECK15:       omp.body.continue129:
15865 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC130:%.*]]
15866 // CHECK15:       omp.inner.for.inc130:
15867 // CHECK15-NEXT:    [[TMP89:%.*]] = load i32, i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
15868 // CHECK15-NEXT:    [[ADD131:%.*]] = add nsw i32 [[TMP89]], 1
15869 // CHECK15-NEXT:    store i32 [[ADD131]], i32* [[DOTOMP_IV118]], align 4, !llvm.access.group !37
15870 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND120]], !llvm.loop [[LOOP38:![0-9]+]]
15871 // CHECK15:       omp.inner.for.end132:
15872 // CHECK15-NEXT:    [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_108]], align 4
15873 // CHECK15-NEXT:    [[SUB133:%.*]] = sub nsw i32 [[TMP90]], 0
15874 // CHECK15-NEXT:    [[DIV134:%.*]] = sdiv i32 [[SUB133]], 1
15875 // CHECK15-NEXT:    [[MUL135:%.*]] = mul nsw i32 [[DIV134]], 1
15876 // CHECK15-NEXT:    [[ADD136:%.*]] = add nsw i32 0, [[MUL135]]
15877 // CHECK15-NEXT:    store i32 [[ADD136]], i32* [[I119]], align 4
15878 // CHECK15-NEXT:    br label [[SIMD_IF_END137]]
15879 // CHECK15:       simd.if.end137:
15880 // CHECK15-NEXT:    [[TMP91:%.*]] = load i32, i32* [[N]], align 4
15881 // CHECK15-NEXT:    store i32 [[TMP91]], i32* [[DOTCAPTURE_EXPR_139]], align 4
15882 // CHECK15-NEXT:    [[TMP92:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
15883 // CHECK15-NEXT:    [[SUB141:%.*]] = sub nsw i32 [[TMP92]], 0
15884 // CHECK15-NEXT:    [[DIV142:%.*]] = sdiv i32 [[SUB141]], 1
15885 // CHECK15-NEXT:    [[SUB143:%.*]] = sub nsw i32 [[DIV142]], 1
15886 // CHECK15-NEXT:    store i32 [[SUB143]], i32* [[DOTCAPTURE_EXPR_140]], align 4
15887 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB144]], align 4
15888 // CHECK15-NEXT:    [[TMP93:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_140]], align 4
15889 // CHECK15-NEXT:    store i32 [[TMP93]], i32* [[DOTOMP_UB145]], align 4
15890 // CHECK15-NEXT:    store i32 0, i32* [[I146]], align 4
15891 // CHECK15-NEXT:    [[TMP94:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
15892 // CHECK15-NEXT:    [[CMP147:%.*]] = icmp slt i32 0, [[TMP94]]
15893 // CHECK15-NEXT:    br i1 [[CMP147]], label [[SIMD_IF_THEN148:%.*]], label [[SIMD_IF_END168:%.*]]
15894 // CHECK15:       simd.if.then148:
15895 // CHECK15-NEXT:    [[TMP95:%.*]] = load i32, i32* [[DOTOMP_LB144]], align 4
15896 // CHECK15-NEXT:    store i32 [[TMP95]], i32* [[DOTOMP_IV149]], align 4
15897 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151:%.*]]
15898 // CHECK15:       omp.inner.for.cond151:
15899 // CHECK15-NEXT:    [[TMP96:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
15900 // CHECK15-NEXT:    [[TMP97:%.*]] = load i32, i32* [[DOTOMP_UB145]], align 4, !llvm.access.group !40
15901 // CHECK15-NEXT:    [[CMP152:%.*]] = icmp sle i32 [[TMP96]], [[TMP97]]
15902 // CHECK15-NEXT:    br i1 [[CMP152]], label [[OMP_INNER_FOR_BODY153:%.*]], label [[OMP_INNER_FOR_END163:%.*]]
15903 // CHECK15:       omp.inner.for.body153:
15904 // CHECK15-NEXT:    [[TMP98:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
15905 // CHECK15-NEXT:    [[MUL154:%.*]] = mul nsw i32 [[TMP98]], 1
15906 // CHECK15-NEXT:    [[ADD155:%.*]] = add nsw i32 0, [[MUL154]]
15907 // CHECK15-NEXT:    store i32 [[ADD155]], i32* [[I150]], align 4, !llvm.access.group !40
15908 // CHECK15-NEXT:    [[TMP99:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !40
15909 // CHECK15-NEXT:    [[TMP100:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
15910 // CHECK15-NEXT:    [[ARRAYIDX156:%.*]] = getelementptr inbounds i32, i32* [[TMP99]], i32 [[TMP100]]
15911 // CHECK15-NEXT:    [[TMP101:%.*]] = load i32, i32* [[ARRAYIDX156]], align 4, !llvm.access.group !40
15912 // CHECK15-NEXT:    [[TMP102:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !40
15913 // CHECK15-NEXT:    [[TMP103:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
15914 // CHECK15-NEXT:    [[ARRAYIDX157:%.*]] = getelementptr inbounds i32, i32* [[TMP102]], i32 [[TMP103]]
15915 // CHECK15-NEXT:    [[TMP104:%.*]] = load i32, i32* [[ARRAYIDX157]], align 4, !llvm.access.group !40
15916 // CHECK15-NEXT:    [[ADD158:%.*]] = add nsw i32 [[TMP101]], [[TMP104]]
15917 // CHECK15-NEXT:    [[TMP105:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !40
15918 // CHECK15-NEXT:    [[TMP106:%.*]] = load i32, i32* [[I150]], align 4, !llvm.access.group !40
15919 // CHECK15-NEXT:    [[ARRAYIDX159:%.*]] = getelementptr inbounds i32, i32* [[TMP105]], i32 [[TMP106]]
15920 // CHECK15-NEXT:    store i32 [[ADD158]], i32* [[ARRAYIDX159]], align 4, !llvm.access.group !40
15921 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE160:%.*]]
15922 // CHECK15:       omp.body.continue160:
15923 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC161:%.*]]
15924 // CHECK15:       omp.inner.for.inc161:
15925 // CHECK15-NEXT:    [[TMP107:%.*]] = load i32, i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
15926 // CHECK15-NEXT:    [[ADD162:%.*]] = add nsw i32 [[TMP107]], 1
15927 // CHECK15-NEXT:    store i32 [[ADD162]], i32* [[DOTOMP_IV149]], align 4, !llvm.access.group !40
15928 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND151]], !llvm.loop [[LOOP41:![0-9]+]]
15929 // CHECK15:       omp.inner.for.end163:
15930 // CHECK15-NEXT:    [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_139]], align 4
15931 // CHECK15-NEXT:    [[SUB164:%.*]] = sub nsw i32 [[TMP108]], 0
15932 // CHECK15-NEXT:    [[DIV165:%.*]] = sdiv i32 [[SUB164]], 1
15933 // CHECK15-NEXT:    [[MUL166:%.*]] = mul nsw i32 [[DIV165]], 1
15934 // CHECK15-NEXT:    [[ADD167:%.*]] = add nsw i32 0, [[MUL166]]
15935 // CHECK15-NEXT:    store i32 [[ADD167]], i32* [[I150]], align 4
15936 // CHECK15-NEXT:    br label [[SIMD_IF_END168]]
15937 // CHECK15:       simd.if.end168:
15938 // CHECK15-NEXT:    [[TMP109:%.*]] = load i32, i32* [[CH]], align 4
15939 // CHECK15-NEXT:    store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_169]], align 4
15940 // CHECK15-NEXT:    [[TMP110:%.*]] = load i32, i32* [[N]], align 4
15941 // CHECK15-NEXT:    store i32 [[TMP110]], i32* [[DOTCAPTURE_EXPR_171]], align 4
15942 // CHECK15-NEXT:    [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
15943 // CHECK15-NEXT:    [[SUB173:%.*]] = sub nsw i32 [[TMP111]], 0
15944 // CHECK15-NEXT:    [[DIV174:%.*]] = sdiv i32 [[SUB173]], 1
15945 // CHECK15-NEXT:    [[SUB175:%.*]] = sub nsw i32 [[DIV174]], 1
15946 // CHECK15-NEXT:    store i32 [[SUB175]], i32* [[DOTCAPTURE_EXPR_172]], align 4
15947 // CHECK15-NEXT:    store i32 0, i32* [[DOTOMP_LB176]], align 4
15948 // CHECK15-NEXT:    [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_172]], align 4
15949 // CHECK15-NEXT:    store i32 [[TMP112]], i32* [[DOTOMP_UB177]], align 4
15950 // CHECK15-NEXT:    store i32 0, i32* [[I178]], align 4
15951 // CHECK15-NEXT:    [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
15952 // CHECK15-NEXT:    [[CMP179:%.*]] = icmp slt i32 0, [[TMP113]]
15953 // CHECK15-NEXT:    br i1 [[CMP179]], label [[SIMD_IF_THEN180:%.*]], label [[SIMD_IF_END200:%.*]]
15954 // CHECK15:       simd.if.then180:
15955 // CHECK15-NEXT:    [[TMP114:%.*]] = load i32, i32* [[DOTOMP_LB176]], align 4
15956 // CHECK15-NEXT:    store i32 [[TMP114]], i32* [[DOTOMP_IV181]], align 4
15957 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183:%.*]]
15958 // CHECK15:       omp.inner.for.cond183:
15959 // CHECK15-NEXT:    [[TMP115:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
15960 // CHECK15-NEXT:    [[TMP116:%.*]] = load i32, i32* [[DOTOMP_UB177]], align 4, !llvm.access.group !43
15961 // CHECK15-NEXT:    [[CMP184:%.*]] = icmp sle i32 [[TMP115]], [[TMP116]]
15962 // CHECK15-NEXT:    br i1 [[CMP184]], label [[OMP_INNER_FOR_BODY185:%.*]], label [[OMP_INNER_FOR_END195:%.*]]
15963 // CHECK15:       omp.inner.for.body185:
15964 // CHECK15-NEXT:    [[TMP117:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
15965 // CHECK15-NEXT:    [[MUL186:%.*]] = mul nsw i32 [[TMP117]], 1
15966 // CHECK15-NEXT:    [[ADD187:%.*]] = add nsw i32 0, [[MUL186]]
15967 // CHECK15-NEXT:    store i32 [[ADD187]], i32* [[I182]], align 4, !llvm.access.group !43
15968 // CHECK15-NEXT:    [[TMP118:%.*]] = load i32*, i32** [[B]], align 4, !llvm.access.group !43
15969 // CHECK15-NEXT:    [[TMP119:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
15970 // CHECK15-NEXT:    [[ARRAYIDX188:%.*]] = getelementptr inbounds i32, i32* [[TMP118]], i32 [[TMP119]]
15971 // CHECK15-NEXT:    [[TMP120:%.*]] = load i32, i32* [[ARRAYIDX188]], align 4, !llvm.access.group !43
15972 // CHECK15-NEXT:    [[TMP121:%.*]] = load i32*, i32** [[C]], align 4, !llvm.access.group !43
15973 // CHECK15-NEXT:    [[TMP122:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
15974 // CHECK15-NEXT:    [[ARRAYIDX189:%.*]] = getelementptr inbounds i32, i32* [[TMP121]], i32 [[TMP122]]
15975 // CHECK15-NEXT:    [[TMP123:%.*]] = load i32, i32* [[ARRAYIDX189]], align 4, !llvm.access.group !43
15976 // CHECK15-NEXT:    [[ADD190:%.*]] = add nsw i32 [[TMP120]], [[TMP123]]
15977 // CHECK15-NEXT:    [[TMP124:%.*]] = load i32*, i32** [[A]], align 4, !llvm.access.group !43
15978 // CHECK15-NEXT:    [[TMP125:%.*]] = load i32, i32* [[I182]], align 4, !llvm.access.group !43
15979 // CHECK15-NEXT:    [[ARRAYIDX191:%.*]] = getelementptr inbounds i32, i32* [[TMP124]], i32 [[TMP125]]
15980 // CHECK15-NEXT:    store i32 [[ADD190]], i32* [[ARRAYIDX191]], align 4, !llvm.access.group !43
15981 // CHECK15-NEXT:    br label [[OMP_BODY_CONTINUE192:%.*]]
15982 // CHECK15:       omp.body.continue192:
15983 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_INC193:%.*]]
15984 // CHECK15:       omp.inner.for.inc193:
15985 // CHECK15-NEXT:    [[TMP126:%.*]] = load i32, i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
15986 // CHECK15-NEXT:    [[ADD194:%.*]] = add nsw i32 [[TMP126]], 1
15987 // CHECK15-NEXT:    store i32 [[ADD194]], i32* [[DOTOMP_IV181]], align 4, !llvm.access.group !43
15988 // CHECK15-NEXT:    br label [[OMP_INNER_FOR_COND183]], !llvm.loop [[LOOP44:![0-9]+]]
15989 // CHECK15:       omp.inner.for.end195:
15990 // CHECK15-NEXT:    [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_171]], align 4
15991 // CHECK15-NEXT:    [[SUB196:%.*]] = sub nsw i32 [[TMP127]], 0
15992 // CHECK15-NEXT:    [[DIV197:%.*]] = sdiv i32 [[SUB196]], 1
15993 // CHECK15-NEXT:    [[MUL198:%.*]] = mul nsw i32 [[DIV197]], 1
15994 // CHECK15-NEXT:    [[ADD199:%.*]] = add nsw i32 0, [[MUL198]]
15995 // CHECK15-NEXT:    store i32 [[ADD199]], i32* [[I182]], align 4
15996 // CHECK15-NEXT:    br label [[SIMD_IF_END200]]
15997 // CHECK15:       simd.if.end200:
15998 // CHECK15-NEXT:    ret i32 0
15999 //
16000